2010-05-07 19:33:27 +00:00
|
|
|
# encoding: UTF-8
|
|
|
|
|
2008-12-17 16:49:06 +00:00
|
|
|
# --
|
2010-02-19 22:41:36 +00:00
|
|
|
# Copyright (C) 2008-2010 10gen Inc.
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2008-12-17 16:49:06 +00:00
|
|
|
# ++
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
require 'set'
|
|
|
|
require 'socket'
|
2009-12-28 18:05:45 +00:00
|
|
|
require 'thread'
|
2009-11-23 20:20:05 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
module Mongo
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Instantiates and manages connections to MongoDB.
|
2009-08-20 22:48:09 +00:00
|
|
|
class Connection
|
2010-08-15 06:04:12 +00:00
|
|
|
TCPSocket = ::TCPSocket
|
|
|
|
Mutex = ::Mutex
|
|
|
|
ConditionVariable = ::ConditionVariable
|
2009-08-20 14:50:48 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Abort connections if a ConnectionError is raised.
|
2009-11-24 20:20:51 +00:00
|
|
|
Thread.abort_on_exception = true
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
DEFAULT_PORT = 27017
|
2009-11-23 20:20:05 +00:00
|
|
|
STANDARD_HEADER_SIZE = 16
|
|
|
|
RESPONSE_HEADER_SIZE = 20
|
|
|
|
|
2010-06-19 19:15:19 +00:00
|
|
|
MONGODB_URI_MATCHER = /(([-_.\w\d]+):([-_\w\d]+)@)?([-.\w\d]+)(:([\w\d]+))?(\/([-\d\w]+))?/
|
2010-02-17 20:15:07 +00:00
|
|
|
MONGODB_URI_SPEC = "mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/database]"
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
attr_reader :logger, :size, :nodes, :auths, :primary, :secondaries, :arbiters,
|
2010-11-24 18:49:34 +00:00
|
|
|
:safe, :primary_pool, :read_pool, :secondary_pools
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
# Counter for generating unique request ids.
|
|
|
|
@@current_request_id = 0
|
2009-08-20 14:50:48 +00:00
|
|
|
|
2010-11-24 18:49:34 +00:00
|
|
|
# Create a connection to single MongoDB instance.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# You may specify whether connection to slave is permitted.
|
2010-01-07 17:37:53 +00:00
|
|
|
# In all cases, the default host is "localhost" and the default port is 27017.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# To specify more than one host pair to be used as seeds in a replica set,
|
|
|
|
# use Connection.multi.
|
2010-01-08 20:43:13 +00:00
|
|
|
#
|
2010-08-24 15:20:54 +00:00
|
|
|
# Once connected to a replica set, you can find out which nodes are primary, secondary, and
|
|
|
|
# arbiters with the corresponding accessors: Connection#primary, Connection#secondaries, and
|
|
|
|
# Connection#arbiters. This is useful if your application needs to connect manually to nodes other
|
|
|
|
# than the primary.
|
|
|
|
#
|
2010-04-07 21:10:28 +00:00
|
|
|
# @param [String, Hash] host.
|
|
|
|
# @param [Integer] port specify a port number here if only one host is being specified.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-03 21:36:08 +00:00
|
|
|
# @option options [Boolean, Hash] :safe (false) Set the default safe-mode options
|
|
|
|
# propogated to DB objects instantiated off of this Connection. This
|
|
|
|
# default can be overridden upon instantiation of any DB by explicity setting a :safe value
|
|
|
|
# on initialization.
|
2010-01-07 17:37:53 +00:00
|
|
|
# @option options [Boolean] :slave_ok (false) Must be set to +true+ when connecting
|
|
|
|
# to a single, slave node.
|
|
|
|
# @option options [Logger, #debug] :logger (nil) Logger instance to receive driver operation log.
|
2010-11-24 18:49:34 +00:00
|
|
|
# @option options [Integer] :pool_size (1) The maximum number of socket connections allowed per
|
|
|
|
# connection pool. Note: this setting is relevant only for multi-threaded applications.
|
|
|
|
# @option options [Float] :timeout (5.0) When all of the connections a pool are checked out,
|
2010-01-07 17:37:53 +00:00
|
|
|
# this is the number of seconds to wait for a new connection to be released before throwing an exception.
|
2010-11-24 18:49:34 +00:00
|
|
|
# Note: this setting is relevant only for multi-threaded applications (which in Ruby are rare).
|
2009-11-23 21:03:33 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 27017
|
|
|
|
# Connection.new
|
2009-12-16 19:03:15 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 27017
|
|
|
|
# Connection.new("localhost")
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 3000, max 5 connections, with max 5 seconds of wait time.
|
|
|
|
# Connection.new("localhost", 3000, :pool_size => 5, :timeout => 5)
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 3000, where this node may be a slave
|
|
|
|
# Connection.new("localhost", 3000, :slave_ok => true)
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# @see http://api.mongodb.org/ruby/current/file.REPLICA_SETS.html Replica sets in Ruby
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
2010-11-03 19:12:15 +00:00
|
|
|
# @raise [ReplicaSetConnectionError] This is raised if a replica set name is specified and the
|
|
|
|
# driver fails to connect to a replica set with that name.
|
|
|
|
#
|
2010-02-17 20:15:07 +00:00
|
|
|
# @core connections
|
2010-04-07 21:10:28 +00:00
|
|
|
def initialize(host=nil, port=nil, options={})
|
2010-02-25 19:58:32 +00:00
|
|
|
@auths = []
|
|
|
|
|
2010-02-17 20:15:07 +00:00
|
|
|
if block_given?
|
2010-02-25 19:58:32 +00:00
|
|
|
@nodes = yield self
|
2010-02-17 20:15:07 +00:00
|
|
|
else
|
2010-04-07 21:10:28 +00:00
|
|
|
@nodes = format_pair(host, port)
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
# Host and port of current master.
|
|
|
|
@host = @port = nil
|
2009-12-16 19:03:15 +00:00
|
|
|
|
2010-11-03 19:12:15 +00:00
|
|
|
# Replica set name
|
2010-11-16 20:43:59 +00:00
|
|
|
@replica_set_name = options[:rs_name]
|
2010-11-03 19:12:15 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Lock for request ids.
|
|
|
|
@id_lock = Mutex.new
|
|
|
|
|
|
|
|
# Pool size and timeout.
|
2010-11-24 18:49:34 +00:00
|
|
|
@pool_size = options[:pool_size] || 1
|
2009-12-21 14:06:28 +00:00
|
|
|
@timeout = options[:timeout] || 5.0
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Mutex for synchronizing pool access
|
2009-12-28 18:05:45 +00:00
|
|
|
@connection_mutex = Mutex.new
|
2010-07-12 18:14:35 +00:00
|
|
|
|
2010-11-03 21:36:08 +00:00
|
|
|
# Global safe option. This is false by default.
|
|
|
|
@safe = options[:safe] || false
|
|
|
|
|
2010-07-12 18:14:35 +00:00
|
|
|
# Create a mutex when a new key, in this case a socket,
|
|
|
|
# is added to the hash.
|
|
|
|
@safe_mutexes = Hash.new { |h, k| h[k] = Mutex.new }
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
# Condition variable for signal and wait
|
2009-12-28 18:05:45 +00:00
|
|
|
@queue = ConditionVariable.new
|
2009-11-23 20:20:05 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# slave_ok can be true only if one node is specified
|
2010-11-29 19:06:38 +00:00
|
|
|
@slave_ok = options[:slave_ok]
|
2010-07-19 16:07:46 +00:00
|
|
|
|
2010-08-24 15:20:54 +00:00
|
|
|
# Cache the various node types
|
|
|
|
# when connecting to a replica set.
|
|
|
|
@primary = nil
|
|
|
|
@secondaries = []
|
|
|
|
@arbiters = []
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
# Connection pool for primay node
|
|
|
|
@primary_pool = nil
|
|
|
|
|
|
|
|
# Connection pools for each secondary node
|
|
|
|
@secondary_pools = []
|
2010-11-19 23:26:38 +00:00
|
|
|
@read_pool = nil
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
@logger = options[:logger] || nil
|
|
|
|
|
2010-07-28 10:27:42 +00:00
|
|
|
should_connect = options.fetch(:connect, true)
|
2010-07-19 16:07:46 +00:00
|
|
|
connect if should_connect
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
|
|
|
|
2010-07-26 22:05:23 +00:00
|
|
|
# Initialize a connection to a MongoDB replica set using an array of seed nodes.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# The seed nodes specified will be used on the initial connection to the replica set, but note
|
|
|
|
# that this list of nodes will be replced by the list of canonical nodes returned by running the
|
|
|
|
# is_master command on the replica set.
|
2010-07-26 22:05:23 +00:00
|
|
|
#
|
|
|
|
# @param nodes [Array] An array of arrays, each of which specifies a host and port.
|
2010-11-24 18:49:34 +00:00
|
|
|
# @param opts [Hash] Any of the available options that can be passed to Connection.new.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# @option options [String] :rs_name (nil) The name of the replica set to connect to. An exception will be
|
|
|
|
# raised if unable to connect to a replica set with this name.
|
|
|
|
# @option options [Boolean] :read_secondary (false) When true, this connection object will pick a random slave
|
|
|
|
# to send reads to.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
|
|
|
# @example
|
2010-11-24 18:49:34 +00:00
|
|
|
# Connection.multi([["db1.example.com", 27017], ["db2.example.com", 27017]])
|
|
|
|
#
|
|
|
|
# @example This connection will read from a random secondary node.
|
2010-07-26 22:05:23 +00:00
|
|
|
# Connection.multi([["db1.example.com", 27017], ["db2.example.com", 27017], ["db3.example.com", 27017]],
|
2010-11-24 18:49:34 +00:00
|
|
|
# :read_secondary => true)
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
|
|
|
# @return [Mongo::Connection]
|
|
|
|
def self.multi(nodes, opts={})
|
|
|
|
unless nodes.length > 0 && nodes.all? {|n| n.is_a? Array}
|
2010-10-13 21:09:23 +00:00
|
|
|
raise MongoArgumentError, "Connection.multi requires at least one node to be specified."
|
2010-07-19 16:07:46 +00:00
|
|
|
end
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-07-19 16:07:46 +00:00
|
|
|
# Block returns an array, the first element being an array of nodes and the second an array
|
|
|
|
# of authorizations for the database.
|
|
|
|
new(nil, nil, opts) do |con|
|
|
|
|
nodes.map do |node|
|
2010-11-16 20:43:59 +00:00
|
|
|
con.instance_variable_set(:@replica_set, true)
|
2010-11-24 18:49:34 +00:00
|
|
|
con.instance_variable_set(:@read_secondary, true) if opts[:read_secondary]
|
2010-07-19 16:07:46 +00:00
|
|
|
con.pair_val_to_connection(node)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-02-17 20:15:07 +00:00
|
|
|
# Initialize a connection to MongoDB using the MongoDB URI spec:
|
|
|
|
#
|
|
|
|
# @param uri [String]
|
|
|
|
# A string of the format mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/database]
|
|
|
|
#
|
|
|
|
# @param opts Any of the options available for Connection.new
|
|
|
|
#
|
|
|
|
# @return [Mongo::Connection]
|
|
|
|
def self.from_uri(uri, opts={})
|
|
|
|
new(nil, nil, opts) do |con|
|
|
|
|
con.parse_uri(uri)
|
|
|
|
end
|
2009-10-07 23:39:36 +00:00
|
|
|
end
|
2009-01-16 14:52:31 +00:00
|
|
|
|
2010-09-28 16:15:45 +00:00
|
|
|
# Fsync, then lock the mongod process against writes. Use this to get
|
|
|
|
# the datafiles in a state safe for snapshotting, backing up, etc.
|
|
|
|
#
|
|
|
|
# @return [BSON::OrderedHash] the command response
|
|
|
|
def lock!
|
|
|
|
cmd = BSON::OrderedHash.new
|
|
|
|
cmd[:fsync] = 1
|
|
|
|
cmd[:lock] = true
|
|
|
|
self['admin'].command(cmd)
|
|
|
|
end
|
|
|
|
|
2010-10-04 15:38:20 +00:00
|
|
|
# Is this database locked against writes?
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def locked?
|
|
|
|
self['admin']['$cmd.sys.inprog'].find_one['fsyncLock'] == 1
|
|
|
|
end
|
|
|
|
|
2010-09-28 16:15:45 +00:00
|
|
|
# Unlock a previously fsync-locked mongod process.
|
|
|
|
#
|
|
|
|
# @return [BSON::OrderedHash] command response
|
|
|
|
def unlock!
|
|
|
|
self['admin']['$cmd.sys.unlock'].find_one
|
|
|
|
end
|
|
|
|
|
2010-02-25 19:58:32 +00:00
|
|
|
# Apply each of the saved database authentications.
|
|
|
|
#
|
|
|
|
# @return [Boolean] returns true if authentications exist and succeeed, false
|
|
|
|
# if none exists.
|
|
|
|
#
|
|
|
|
# @raise [AuthenticationError] raises an exception if any one
|
|
|
|
# authentication fails.
|
|
|
|
def apply_saved_authentication
|
|
|
|
return false if @auths.empty?
|
|
|
|
@auths.each do |auth|
|
|
|
|
self[auth['db_name']].authenticate(auth['username'], auth['password'], false)
|
|
|
|
end
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
|
|
|
# Save an authentication to this connection. When connecting,
|
|
|
|
# the connection will attempt to re-authenticate on every db
|
2010-03-01 15:39:50 +00:00
|
|
|
# specificed in the list of auths. This method is called automatically
|
|
|
|
# by DB#authenticate.
|
2010-02-25 19:58:32 +00:00
|
|
|
#
|
2010-05-04 20:06:06 +00:00
|
|
|
# Note: this method will not actually issue an authentication command. To do that,
|
|
|
|
# either run Connection#apply_saved_authentication or DB#authenticate.
|
|
|
|
#
|
2010-02-25 19:58:32 +00:00
|
|
|
# @param [String] db_name
|
|
|
|
# @param [String] username
|
|
|
|
# @param [String] password
|
|
|
|
#
|
|
|
|
# @return [Hash] a hash representing the authentication just added.
|
|
|
|
def add_auth(db_name, username, password)
|
|
|
|
remove_auth(db_name)
|
|
|
|
auth = {}
|
|
|
|
auth['db_name'] = db_name
|
|
|
|
auth['username'] = username
|
|
|
|
auth['password'] = password
|
|
|
|
@auths << auth
|
|
|
|
auth
|
|
|
|
end
|
|
|
|
|
|
|
|
# Remove a saved authentication for this connection.
|
|
|
|
#
|
|
|
|
# @param [String] db_name
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def remove_auth(db_name)
|
|
|
|
return unless @auths
|
|
|
|
if @auths.reject! { |a| a['db_name'] == db_name }
|
|
|
|
true
|
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Remove all authenication information stored in this connection.
|
|
|
|
#
|
|
|
|
# @return [true] this operation return true because it always succeeds.
|
|
|
|
def clear_auths
|
|
|
|
@auths = []
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return a hash with all database names
|
|
|
|
# and their respective sizes on disk.
|
|
|
|
#
|
|
|
|
# @return [Hash]
|
2009-08-20 14:50:48 +00:00
|
|
|
def database_info
|
2010-05-18 20:17:17 +00:00
|
|
|
doc = self['admin'].command({:listDatabases => 1})
|
2010-09-04 13:14:46 +00:00
|
|
|
doc['databases'].each_with_object({}) do |db, info|
|
|
|
|
info[db['name']] = db['sizeOnDisk'].to_i
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return an array of database names.
|
|
|
|
#
|
|
|
|
# @return [Array]
|
2009-08-20 14:50:48 +00:00
|
|
|
def database_names
|
|
|
|
database_info.keys
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return a database with the given name.
|
|
|
|
# See DB#new for valid options hash parameters.
|
|
|
|
#
|
|
|
|
# @param [String] db_name a valid database name.
|
|
|
|
#
|
|
|
|
# @return [Mongo::DB]
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core databases db-instance_method
|
2009-11-23 20:20:05 +00:00
|
|
|
def db(db_name, options={})
|
2010-10-21 17:27:57 +00:00
|
|
|
DB.new(db_name, self, options)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Shortcut for returning a database. Use DB#db to accept options.
|
|
|
|
#
|
|
|
|
# @param [String] db_name a valid database name.
|
|
|
|
#
|
|
|
|
# @return [Mongo::DB]
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core databases []-instance_method
|
2009-11-23 20:20:05 +00:00
|
|
|
def [](db_name)
|
2010-11-03 21:36:08 +00:00
|
|
|
DB.new(db_name, self, :safe => @safe)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Drop a database.
|
|
|
|
#
|
|
|
|
# @param [String] name name of an existing database.
|
2009-08-20 14:50:48 +00:00
|
|
|
def drop_database(name)
|
2009-11-23 20:20:05 +00:00
|
|
|
self[name].command(:dropDatabase => 1)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-01-23 18:30:59 +00:00
|
|
|
|
2010-03-16 17:56:30 +00:00
|
|
|
# Copy the database +from+ to +to+ on localhost. The +from+ database is
|
|
|
|
# assumed to be on localhost, but an alternate host can be specified.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
|
|
|
# @param [String] from name of the database to copy from.
|
|
|
|
# @param [String] to name of the database to copy to.
|
|
|
|
# @param [String] from_host host of the 'from' database.
|
2010-03-16 17:56:30 +00:00
|
|
|
# @param [String] username username for authentication against from_db (>=1.3.x).
|
|
|
|
# @param [String] password password for authentication against from_db (>=1.3.x).
|
|
|
|
def copy_database(from, to, from_host="localhost", username=nil, password=nil)
|
2010-05-07 01:25:18 +00:00
|
|
|
oh = BSON::OrderedHash.new
|
2009-11-04 16:57:03 +00:00
|
|
|
oh[:copydb] = 1
|
2010-01-07 17:37:53 +00:00
|
|
|
oh[:fromhost] = from_host
|
2009-11-04 16:57:03 +00:00
|
|
|
oh[:fromdb] = from
|
|
|
|
oh[:todb] = to
|
2010-03-16 17:56:30 +00:00
|
|
|
if username || password
|
|
|
|
unless username && password
|
|
|
|
raise MongoArgumentError, "Both username and password must be supplied for authentication."
|
|
|
|
end
|
2010-05-07 01:25:18 +00:00
|
|
|
nonce_cmd = BSON::OrderedHash.new
|
2010-03-16 17:56:30 +00:00
|
|
|
nonce_cmd[:copydbgetnonce] = 1
|
|
|
|
nonce_cmd[:fromhost] = from_host
|
2010-05-18 20:17:17 +00:00
|
|
|
result = self["admin"].command(nonce_cmd)
|
2010-03-16 17:56:30 +00:00
|
|
|
oh[:nonce] = result["nonce"]
|
|
|
|
oh[:username] = username
|
|
|
|
oh[:key] = Mongo::Support.auth_key(username, password, oh[:nonce])
|
|
|
|
end
|
2010-05-18 20:17:17 +00:00
|
|
|
self["admin"].command(oh)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Increment and return the next available request id.
|
|
|
|
#
|
|
|
|
# return [Integer]
|
2009-11-23 20:20:05 +00:00
|
|
|
def get_request_id
|
|
|
|
request_id = ''
|
2009-12-16 19:03:15 +00:00
|
|
|
@id_lock.synchronize do
|
2009-11-23 20:20:05 +00:00
|
|
|
request_id = @@current_request_id += 1
|
|
|
|
end
|
|
|
|
request_id
|
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Get the build information for the current connection.
|
|
|
|
#
|
|
|
|
# @return [Hash]
|
2009-10-26 18:54:33 +00:00
|
|
|
def server_info
|
2010-05-18 20:17:17 +00:00
|
|
|
self["admin"].command({:buildinfo => 1})
|
2009-10-26 18:54:33 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Get the build version of the current server.
|
|
|
|
#
|
|
|
|
# @return [Mongo::ServerVersion]
|
|
|
|
# object allowing easy comparability of version.
|
2009-10-26 18:54:33 +00:00
|
|
|
def server_version
|
|
|
|
ServerVersion.new(server_info["version"])
|
|
|
|
end
|
|
|
|
|
2010-01-08 21:18:07 +00:00
|
|
|
# Is it okay to connect to a slave?
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def slave_ok?
|
2010-11-24 18:49:34 +00:00
|
|
|
@read_secondary || @slave_ok
|
2010-01-08 21:18:07 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Send a message to MongoDB, adding the necessary headers.
|
|
|
|
#
|
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2009-11-23 20:20:05 +00:00
|
|
|
#
|
2010-07-28 02:19:25 +00:00
|
|
|
# @return [Integer] number of bytes sent
|
2009-11-24 19:23:43 +00:00
|
|
|
def send_message(operation, message, log_message=nil)
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
|
|
|
packed_message = add_message_headers(operation, message).to_s
|
2010-11-16 20:43:59 +00:00
|
|
|
socket = checkout_writer
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, socket)
|
|
|
|
ensure
|
2010-11-16 20:43:59 +00:00
|
|
|
checkin_writer(socket)
|
2010-01-20 17:40:16 +00:00
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Sends a message to the database, waits for a response, and raises
|
2009-12-23 17:12:46 +00:00
|
|
|
# an exception if the operation has failed.
|
2009-11-24 19:23:43 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [String] db_name the name of the database. used on call to get_last_error.
|
2010-05-04 20:00:05 +00:00
|
|
|
# @param [Hash] last_error_params parameters to be sent to getLastError. See DB#error for
|
|
|
|
# available options.
|
|
|
|
#
|
2010-08-04 20:47:13 +00:00
|
|
|
# @see DB#get_last_error for valid last error params.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
2010-11-09 18:07:01 +00:00
|
|
|
# @return [Hash] The document returned by the call to getlasterror.
|
2010-05-04 20:00:05 +00:00
|
|
|
def send_message_with_safe_check(operation, message, db_name, log_message=nil, last_error_params=false)
|
2009-11-23 20:20:05 +00:00
|
|
|
message_with_headers = add_message_headers(operation, message)
|
2010-05-04 20:00:05 +00:00
|
|
|
message_with_check = last_error_message(db_name, last_error_params)
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
2010-11-16 20:43:59 +00:00
|
|
|
sock = checkout_writer
|
2010-01-20 17:40:16 +00:00
|
|
|
packed_message = message_with_headers.append!(message_with_check).to_s
|
|
|
|
docs = num_received = cursor_id = ''
|
2010-07-12 18:14:35 +00:00
|
|
|
@safe_mutexes[sock].synchronize do
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, sock)
|
|
|
|
docs, num_received, cursor_id = receive(sock)
|
|
|
|
end
|
|
|
|
ensure
|
2010-11-16 20:43:59 +00:00
|
|
|
checkin_writer(sock)
|
2009-12-29 18:27:12 +00:00
|
|
|
end
|
2010-11-09 18:07:01 +00:00
|
|
|
|
2010-05-04 20:00:05 +00:00
|
|
|
if num_received == 1 && (error = docs[0]['err'] || docs[0]['errmsg'])
|
2010-10-30 03:18:49 +00:00
|
|
|
close if error == "not master"
|
2010-11-09 18:07:01 +00:00
|
|
|
raise Mongo::OperationFailure, docs[0]['code'].to_s + ': ' + error
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2010-11-09 18:07:01 +00:00
|
|
|
|
|
|
|
docs[0]
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2009-11-24 19:23:43 +00:00
|
|
|
# Sends a message to the database and waits for the response.
|
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Socket] socket a socket to use in lieu of checking out a new one.
|
|
|
|
#
|
|
|
|
# @return [Array]
|
|
|
|
# An array whose indexes include [0] documents returned, [1] number of document received,
|
|
|
|
# and [3] a cursor_id.
|
2010-11-16 20:43:59 +00:00
|
|
|
def receive_message(operation, message, log_message=nil, socket=nil, command=false)
|
2009-11-24 19:43:52 +00:00
|
|
|
packed_message = add_message_headers(operation, message).to_s
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
2010-11-16 20:43:59 +00:00
|
|
|
sock = socket || (command ? checkout_writer : checkout_reader)
|
2009-11-23 20:20:05 +00:00
|
|
|
|
2010-01-20 17:40:16 +00:00
|
|
|
result = ''
|
2010-07-12 18:14:35 +00:00
|
|
|
@safe_mutexes[sock].synchronize do
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, sock)
|
|
|
|
result = receive(sock)
|
|
|
|
end
|
|
|
|
ensure
|
2010-11-16 20:43:59 +00:00
|
|
|
command ? checkin_writer(sock) : checkin_reader(sock)
|
2009-12-29 18:27:12 +00:00
|
|
|
end
|
2009-11-24 18:47:37 +00:00
|
|
|
result
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Create a new socket and attempt to connect to master.
|
2010-01-05 22:42:52 +00:00
|
|
|
# If successful, sets host and port to master and returns the socket.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
2010-11-03 19:32:19 +00:00
|
|
|
# If connecting to a replica set, this method will replace the
|
2010-07-19 16:07:46 +00:00
|
|
|
# initially-provided seed list with any nodes known to the set.
|
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @raise [ConnectionFailure] if unable to connect to any host or port.
|
2010-07-19 16:07:46 +00:00
|
|
|
def connect
|
|
|
|
reset_connection
|
2010-11-16 20:43:59 +00:00
|
|
|
@nodes_to_try = @nodes.clone
|
2010-07-19 16:07:46 +00:00
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
while connecting?
|
|
|
|
node = @nodes_to_try.shift
|
|
|
|
config = check_is_master(node)
|
|
|
|
|
|
|
|
if is_primary?(config)
|
|
|
|
set_primary(node)
|
|
|
|
else
|
|
|
|
set_auxillary(node, config)
|
2010-07-19 16:07:46 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-11-19 23:26:38 +00:00
|
|
|
pick_secondary_for_read
|
|
|
|
|
2010-07-19 16:07:46 +00:00
|
|
|
raise ConnectionFailure, "failed to connect to any given host:port" unless connected?
|
|
|
|
end
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
def connecting?
|
|
|
|
!(connected? && @nodes_to_try.empty?)
|
|
|
|
end
|
|
|
|
|
|
|
|
# It's possible that we defined connected as all nodes being connected???
|
|
|
|
# NOTE: Do check if this needs to be more stringent.
|
|
|
|
# Probably not since if any node raises a connection failure, all nodes will be closed.
|
2009-11-23 20:20:05 +00:00
|
|
|
def connected?
|
2010-11-16 20:43:59 +00:00
|
|
|
@primary_pool && @primary_pool.host && @primary_pool.port
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Close the connection to the database.
|
|
|
|
def close
|
2010-11-16 20:43:59 +00:00
|
|
|
@primary_pool.close if @primary_pool
|
|
|
|
@primary_pool = nil
|
2010-11-19 23:26:38 +00:00
|
|
|
@read_pool = nil
|
2010-11-16 20:43:59 +00:00
|
|
|
@secondary_pools.each do |pool|
|
|
|
|
pool.close
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-02-17 20:15:07 +00:00
|
|
|
## Configuration helper methods
|
|
|
|
|
|
|
|
# Returns an array of host-port pairs.
|
|
|
|
#
|
|
|
|
# @private
|
|
|
|
def format_pair(pair_or_host, port)
|
|
|
|
case pair_or_host
|
|
|
|
when String
|
|
|
|
[[pair_or_host, port ? port.to_i : DEFAULT_PORT]]
|
|
|
|
when nil
|
|
|
|
[['localhost', DEFAULT_PORT]]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Convert an argument containing a host name string and a
|
|
|
|
# port number integer into a [host, port] pair array.
|
|
|
|
#
|
|
|
|
# @private
|
|
|
|
def pair_val_to_connection(a)
|
|
|
|
case a
|
|
|
|
when nil
|
|
|
|
['localhost', DEFAULT_PORT]
|
|
|
|
when String
|
|
|
|
[a, DEFAULT_PORT]
|
|
|
|
when Integer
|
|
|
|
['localhost', a]
|
|
|
|
when Array
|
|
|
|
a
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Parse a MongoDB URI. This method is used by Connection.from_uri.
|
|
|
|
# Returns an array of nodes and an array of db authorizations, if applicable.
|
|
|
|
#
|
|
|
|
# @private
|
|
|
|
def parse_uri(string)
|
|
|
|
if string =~ /^mongodb:\/\//
|
|
|
|
string = string[10..-1]
|
|
|
|
else
|
|
|
|
raise MongoArgumentError, "MongoDB URI must match this spec: #{MONGODB_URI_SPEC}"
|
|
|
|
end
|
|
|
|
|
|
|
|
nodes = []
|
|
|
|
auths = []
|
|
|
|
specs = string.split(',')
|
|
|
|
specs.each do |spec|
|
|
|
|
matches = MONGODB_URI_MATCHER.match(spec)
|
|
|
|
if !matches
|
|
|
|
raise MongoArgumentError, "MongoDB URI must match this spec: #{MONGODB_URI_SPEC}"
|
|
|
|
end
|
|
|
|
|
|
|
|
uname = matches[2]
|
|
|
|
pwd = matches[3]
|
|
|
|
host = matches[4]
|
|
|
|
port = matches[6] || DEFAULT_PORT
|
|
|
|
if !(port.to_s =~ /^\d+$/)
|
|
|
|
raise MongoArgumentError, "Invalid port #{port}; port must be specified as digits."
|
|
|
|
end
|
|
|
|
port = port.to_i
|
|
|
|
db = matches[8]
|
|
|
|
|
2010-06-12 14:11:29 +00:00
|
|
|
if uname && pwd && db
|
|
|
|
add_auth(db, uname, pwd)
|
|
|
|
elsif uname || pwd || db
|
2010-02-17 20:15:07 +00:00
|
|
|
raise MongoArgumentError, "MongoDB URI must include all three of username, password, " +
|
|
|
|
"and db if any one of these is specified."
|
|
|
|
end
|
|
|
|
|
|
|
|
nodes << [host, port]
|
|
|
|
end
|
|
|
|
|
2010-02-25 19:58:32 +00:00
|
|
|
nodes
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
# Checkout a socket for reading (i.e., a secondary node).
|
|
|
|
def checkout_reader
|
|
|
|
connect unless connected?
|
|
|
|
|
2010-11-19 23:26:38 +00:00
|
|
|
if @read_pool
|
|
|
|
@read_pool.checkout
|
|
|
|
else
|
|
|
|
checkout_writer
|
2010-11-16 20:43:59 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Checkout a socket for writing (i.e., a primary node).
|
|
|
|
def checkout_writer
|
|
|
|
connect unless connected?
|
|
|
|
|
|
|
|
@primary_pool.checkout
|
|
|
|
end
|
|
|
|
|
|
|
|
# Checkin a socket used for reading.
|
|
|
|
def checkin_reader(socket)
|
2010-11-19 23:26:38 +00:00
|
|
|
if @read_pool
|
|
|
|
@read_pool.checkin(socket)
|
|
|
|
else
|
|
|
|
checkin_writer(socket)
|
2010-11-16 20:43:59 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Checkin a socket used for writing.
|
|
|
|
def checkin_writer(socket)
|
|
|
|
if @primary_pool
|
|
|
|
@primary_pool.checkin(socket)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-24 19:23:43 +00:00
|
|
|
private
|
|
|
|
|
2010-11-19 23:26:38 +00:00
|
|
|
# Pick a node randomly from the set of possibly secondaries.
|
|
|
|
def pick_secondary_for_read
|
|
|
|
if (size = @secondary_pools.size) > 1
|
|
|
|
@read_pool = @secondary_pools[rand(size)]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-07-19 16:23:12 +00:00
|
|
|
# If a ConnectionFailure is raised, this method will be called
|
|
|
|
# to close the connection and reset connection values.
|
|
|
|
def reset_connection
|
|
|
|
close
|
2010-08-24 15:20:54 +00:00
|
|
|
@primary = nil
|
2010-11-16 20:43:59 +00:00
|
|
|
@secondaries = []
|
|
|
|
@secondary_pools = []
|
|
|
|
@arbiters = []
|
|
|
|
@nodes_tried = []
|
|
|
|
@nodes_to_try = []
|
2010-07-19 16:23:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Primary is defined as either a master node or a slave if
|
|
|
|
# :slave_ok has been set to +true+.
|
|
|
|
#
|
|
|
|
# If a primary node is discovered, we set the the @host and @port and
|
|
|
|
# apply any saved authentication.
|
|
|
|
def is_primary?(config)
|
2010-11-16 20:43:59 +00:00
|
|
|
config && (config['ismaster'] == 1 || config['ismaster'] == true) || !@replica_set && @slave_ok
|
2010-07-19 16:23:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
def check_is_master(node)
|
|
|
|
begin
|
|
|
|
host, port = *node
|
|
|
|
socket = TCPSocket.new(host, port)
|
|
|
|
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
|
|
|
|
|
|
|
config = self['admin'].command({:ismaster => 1}, :sock => socket)
|
|
|
|
|
2010-11-03 19:12:15 +00:00
|
|
|
check_set_name(config, socket)
|
2010-07-19 16:23:12 +00:00
|
|
|
rescue OperationFailure, SocketError, SystemCallError, IOError => ex
|
2010-10-21 18:33:48 +00:00
|
|
|
close unless connected?
|
2010-07-19 16:23:12 +00:00
|
|
|
ensure
|
|
|
|
@nodes_tried << node
|
|
|
|
if config
|
|
|
|
update_node_list(config['hosts']) if config['hosts']
|
2010-11-16 20:43:59 +00:00
|
|
|
|
|
|
|
if config['msg'] && @logger
|
|
|
|
@logger.warn("MONGODB #{config['msg']}")
|
2010-07-21 19:27:29 +00:00
|
|
|
end
|
2010-07-19 16:23:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
socket.close if socket
|
|
|
|
end
|
|
|
|
|
|
|
|
config
|
|
|
|
end
|
|
|
|
|
2010-11-03 19:12:15 +00:00
|
|
|
# Make sure that we're connected to the expected replica set.
|
|
|
|
def check_set_name(config, socket)
|
|
|
|
if @replica_set_name
|
|
|
|
config = self['admin'].command({:replSetGetStatus => 1},
|
|
|
|
:sock => socket, :check_response => false)
|
|
|
|
|
|
|
|
if !Mongo::Support.ok?(config)
|
|
|
|
raise ReplicaSetConnectionError, config['errmsg']
|
|
|
|
elsif config['set'] != @replica_set_name
|
|
|
|
raise ReplicaSetConnectionError,
|
|
|
|
"Attempting to connect to replica set '#{config['set']}' but expected '#{@replica_set_name}'"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-07-19 16:23:12 +00:00
|
|
|
# Set the specified node as primary, and
|
|
|
|
# apply any saved authentication credentials.
|
|
|
|
def set_primary(node)
|
2010-11-16 20:43:59 +00:00
|
|
|
host, port = *node
|
|
|
|
@primary = [host, port]
|
2010-11-24 18:49:34 +00:00
|
|
|
@primary_pool = Pool.new(self, host, port, :size => @pool_size, :timeout => @timeout)
|
2010-07-19 16:23:12 +00:00
|
|
|
apply_saved_authentication
|
|
|
|
end
|
|
|
|
|
2010-08-24 15:20:54 +00:00
|
|
|
# Determines what kind of node we have and caches its host
|
|
|
|
# and port so that users can easily connect manually.
|
|
|
|
def set_auxillary(node, config)
|
|
|
|
if config
|
|
|
|
if config['secondary']
|
2010-11-16 20:43:59 +00:00
|
|
|
host, port = *node
|
2010-08-24 15:20:54 +00:00
|
|
|
@secondaries << node unless @secondaries.include?(node)
|
2010-11-24 18:49:34 +00:00
|
|
|
if @read_secondary
|
|
|
|
@secondary_pools << Pool.new(self, host, port, :size => @pool_size, :timeout => @timeout)
|
|
|
|
end
|
2010-08-24 15:20:54 +00:00
|
|
|
elsif config['arbiterOnly']
|
|
|
|
@arbiters << node unless @arbiters.include?(node)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-07-19 16:23:12 +00:00
|
|
|
# Update the list of known nodes. Only applies to replica sets,
|
|
|
|
# where the response to the ismaster command will return a list
|
|
|
|
# of known hosts.
|
|
|
|
#
|
|
|
|
# @param hosts [Array] a list of hosts, specified as string-encoded
|
|
|
|
# host-port values. Example: ["myserver-1.org:27017", "myserver-1.org:27017"]
|
|
|
|
#
|
|
|
|
# @return [Array] the updated list of nodes
|
|
|
|
def update_node_list(hosts)
|
|
|
|
new_nodes = hosts.map do |host|
|
|
|
|
if !host.respond_to?(:split)
|
|
|
|
warn "Could not parse host #{host.inspect}."
|
|
|
|
next
|
|
|
|
end
|
|
|
|
|
|
|
|
host, port = host.split(':')
|
|
|
|
[host, port.to_i]
|
|
|
|
end
|
|
|
|
|
2010-11-24 18:49:34 +00:00
|
|
|
# Replace the list of seed nodes with the canonical list.
|
|
|
|
@nodes = new_nodes.clone
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
@nodes_to_try = new_nodes - @nodes_tried
|
2009-11-24 21:13:14 +00:00
|
|
|
end
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
def receive(sock)
|
2010-09-11 17:56:21 +00:00
|
|
|
receive_and_discard_header(sock)
|
2009-11-23 20:20:05 +00:00
|
|
|
number_received, cursor_id = receive_response_header(sock)
|
|
|
|
read_documents(number_received, cursor_id, sock)
|
|
|
|
end
|
|
|
|
|
|
|
|
def receive_header(sock)
|
2010-04-05 14:39:55 +00:00
|
|
|
header = BSON::ByteBuffer.new
|
2010-09-12 20:24:20 +00:00
|
|
|
header.put_binary(receive_message_on_socket(16, sock))
|
2009-11-23 20:20:05 +00:00
|
|
|
unless header.size == STANDARD_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header: " +
|
2009-12-16 19:03:15 +00:00
|
|
|
"expected #{STANDARD_HEADER_SIZE} bytes, saw #{header.size}"
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
header.rewind
|
|
|
|
size = header.get_int
|
|
|
|
request_id = header.get_int
|
|
|
|
response_to = header.get_int
|
|
|
|
op = header.get_int
|
|
|
|
end
|
2010-10-30 03:18:49 +00:00
|
|
|
|
2010-09-11 17:56:21 +00:00
|
|
|
def receive_and_discard_header(sock)
|
|
|
|
bytes_read = receive_and_discard_message_on_socket(16, sock)
|
|
|
|
unless bytes_read == STANDARD_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header: " +
|
|
|
|
"expected #{STANDARD_HEADER_SIZE} bytes, saw #{bytes_read}"
|
|
|
|
end
|
|
|
|
nil
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
def receive_response_header(sock)
|
2010-09-11 19:29:57 +00:00
|
|
|
header_buf = receive_message_on_socket(RESPONSE_HEADER_SIZE, sock)
|
2009-11-23 20:20:05 +00:00
|
|
|
if header_buf.length != RESPONSE_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header; " +
|
|
|
|
"expected #{RESPONSE_HEADER_SIZE} bytes, saw #{header_buf.length}"
|
|
|
|
end
|
2010-09-11 19:29:57 +00:00
|
|
|
flags, cursor_id_a, cursor_id_b, starting_from, number_remaining = header_buf.unpack('VVVVV')
|
|
|
|
check_response_flags(flags)
|
|
|
|
cursor_id = (cursor_id_b << 32) + cursor_id_a
|
2009-11-23 20:20:05 +00:00
|
|
|
[number_remaining, cursor_id]
|
|
|
|
end
|
|
|
|
|
2010-07-12 16:11:01 +00:00
|
|
|
def check_response_flags(flags)
|
|
|
|
if flags & Mongo::Constants::REPLY_CURSOR_NOT_FOUND != 0
|
|
|
|
raise Mongo::OperationFailure, "Query response returned CURSOR_NOT_FOUND. " +
|
|
|
|
"Either an invalid cursor was specified, or the cursor may have timed out on the server."
|
|
|
|
elsif flags & Mongo::Constants::REPLY_QUERY_FAILURE != 0
|
2010-07-12 17:10:43 +00:00
|
|
|
# Getting odd failures when a exception is raised here.
|
2010-07-12 16:11:01 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
def read_documents(number_received, cursor_id, sock)
|
|
|
|
docs = []
|
|
|
|
number_remaining = number_received
|
|
|
|
while number_remaining > 0 do
|
2010-09-11 17:46:10 +00:00
|
|
|
buf = receive_message_on_socket(4, sock)
|
|
|
|
size = buf.unpack('V')[0]
|
|
|
|
buf << receive_message_on_socket(size - 4, sock)
|
2009-11-23 20:20:05 +00:00
|
|
|
number_remaining -= 1
|
2010-04-05 14:39:55 +00:00
|
|
|
docs << BSON::BSON_CODER.deserialize(buf)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
[docs, number_received, cursor_id]
|
|
|
|
end
|
|
|
|
|
2010-05-04 20:00:05 +00:00
|
|
|
# Constructs a getlasterror message. This method is used exclusively by
|
|
|
|
# Connection#send_message_with_safe_check.
|
|
|
|
def last_error_message(db_name, opts)
|
2010-04-05 14:39:55 +00:00
|
|
|
message = BSON::ByteBuffer.new
|
2009-11-23 20:20:05 +00:00
|
|
|
message.put_int(0)
|
2010-04-05 14:39:55 +00:00
|
|
|
BSON::BSON_RUBY.serialize_cstr(message, "#{db_name}.$cmd")
|
2009-11-23 20:20:05 +00:00
|
|
|
message.put_int(0)
|
|
|
|
message.put_int(-1)
|
2010-05-07 01:25:18 +00:00
|
|
|
cmd = BSON::OrderedHash.new
|
2010-05-04 20:00:05 +00:00
|
|
|
cmd[:getlasterror] = 1
|
|
|
|
if opts.is_a?(Hash)
|
|
|
|
opts.assert_valid_keys(:w, :wtimeout, :fsync)
|
|
|
|
cmd.merge!(opts)
|
|
|
|
end
|
2010-09-12 20:24:20 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(cmd, false).to_s)
|
2009-11-23 20:20:05 +00:00
|
|
|
add_message_headers(Mongo::Constants::OP_QUERY, message)
|
|
|
|
end
|
2009-12-16 19:03:15 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Prepares a message for transmission to MongoDB by
|
2009-11-24 19:43:52 +00:00
|
|
|
# constructing a valid message header.
|
|
|
|
def add_message_headers(operation, message)
|
2010-09-13 20:53:16 +00:00
|
|
|
headers = [
|
|
|
|
# Message size.
|
|
|
|
16 + message.size,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Unique request id.
|
|
|
|
get_request_id,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Response id.
|
|
|
|
0,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Opcode.
|
|
|
|
operation
|
|
|
|
].pack('VVVV')
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
message.prepend!(headers)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Low-level method for sending a message on a socket.
|
2009-12-16 19:03:15 +00:00
|
|
|
# Requires a packed message and an available socket,
|
2010-07-28 02:19:25 +00:00
|
|
|
#
|
|
|
|
# @return [Integer] number of bytes sent
|
2009-11-23 20:20:05 +00:00
|
|
|
def send_message_on_socket(packed_message, socket)
|
2009-11-24 18:55:59 +00:00
|
|
|
begin
|
2010-09-13 20:55:36 +00:00
|
|
|
total_bytes_sent = socket.send(packed_message, 0)
|
|
|
|
if total_bytes_sent != packed_message.size
|
|
|
|
packed_message.slice!(0, total_bytes_sent)
|
|
|
|
while packed_message.size > 0
|
|
|
|
byte_sent = socket.send(packed_message, 0)
|
|
|
|
total_bytes_sent += byte_sent
|
|
|
|
packed_message.slice!(0, byte_sent)
|
|
|
|
end
|
2010-06-24 17:46:06 +00:00
|
|
|
end
|
2010-07-28 02:19:25 +00:00
|
|
|
total_bytes_sent
|
2009-11-24 18:55:59 +00:00
|
|
|
rescue => ex
|
|
|
|
close
|
|
|
|
raise ConnectionFailure, "Operation failed with the following exception: #{ex}"
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Low-level method for receiving data from socket.
|
|
|
|
# Requires length and an available socket.
|
|
|
|
def receive_message_on_socket(length, socket)
|
2009-11-24 18:55:59 +00:00
|
|
|
begin
|
2010-09-13 20:56:37 +00:00
|
|
|
message = socket.read(length)
|
2010-09-11 10:22:51 +00:00
|
|
|
raise ConnectionFailure, "connection closed" unless message.length > 0
|
|
|
|
if message.length < length
|
|
|
|
chunk = new_binary_string
|
|
|
|
while message.length < length
|
|
|
|
socket.read(length - message.length, chunk)
|
|
|
|
raise ConnectionFailure, "connection closed" unless chunk.length > 0
|
|
|
|
message << chunk
|
|
|
|
end
|
2009-11-24 18:55:59 +00:00
|
|
|
end
|
|
|
|
rescue => ex
|
2010-02-04 23:07:45 +00:00
|
|
|
close
|
2009-11-24 18:55:59 +00:00
|
|
|
raise ConnectionFailure, "Operation failed with the following exception: #{ex}"
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
message
|
|
|
|
end
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-11 17:56:21 +00:00
|
|
|
# Low-level data for receiving data from socket.
|
|
|
|
# Unlike #receive_message_on_socket, this method immediately discards the data
|
|
|
|
# and only returns the number of bytes read.
|
|
|
|
def receive_and_discard_message_on_socket(length, socket)
|
|
|
|
bytes_read = 0
|
|
|
|
begin
|
2010-09-13 20:56:37 +00:00
|
|
|
chunk = socket.read(length)
|
2010-09-11 17:56:21 +00:00
|
|
|
bytes_read = chunk.length
|
|
|
|
raise ConnectionFailure, "connection closed" unless bytes_read > 0
|
|
|
|
if bytes_read < length
|
|
|
|
while bytes_read < length
|
|
|
|
socket.read(length - bytes_read, chunk)
|
|
|
|
raise ConnectionFailure, "connection closed" unless chunk.length > 0
|
|
|
|
bytes_read += chunk.length
|
|
|
|
end
|
|
|
|
end
|
|
|
|
rescue => ex
|
|
|
|
close
|
|
|
|
raise ConnectionFailure, "Operation failed with the following exception: #{ex}"
|
|
|
|
end
|
|
|
|
bytes_read
|
|
|
|
end
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-11 10:22:51 +00:00
|
|
|
if defined?(Encoding)
|
|
|
|
BINARY_ENCODING = Encoding.find("binary")
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-11 10:22:51 +00:00
|
|
|
def new_binary_string
|
|
|
|
"".force_encoding(BINARY_ENCODING)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
def new_binary_string
|
|
|
|
""
|
|
|
|
end
|
|
|
|
end
|
2009-08-20 22:48:09 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|