2010-05-07 19:33:27 +00:00
|
|
|
# encoding: UTF-8
|
|
|
|
|
2008-12-17 16:49:06 +00:00
|
|
|
# --
|
2011-01-17 17:26:32 +00:00
|
|
|
# Copyright (C) 2008-2011 10gen Inc.
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2008-12-17 16:49:06 +00:00
|
|
|
# ++
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
require 'set'
|
|
|
|
require 'socket'
|
2009-12-28 18:05:45 +00:00
|
|
|
require 'thread'
|
2009-11-23 20:20:05 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
module Mongo
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Instantiates and manages connections to MongoDB.
|
2009-08-20 22:48:09 +00:00
|
|
|
class Connection
|
2010-08-15 06:04:12 +00:00
|
|
|
TCPSocket = ::TCPSocket
|
|
|
|
Mutex = ::Mutex
|
|
|
|
ConditionVariable = ::ConditionVariable
|
2009-08-20 14:50:48 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Abort connections if a ConnectionError is raised.
|
2009-11-24 20:20:51 +00:00
|
|
|
Thread.abort_on_exception = true
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
DEFAULT_PORT = 27017
|
2009-11-23 20:20:05 +00:00
|
|
|
STANDARD_HEADER_SIZE = 16
|
|
|
|
RESPONSE_HEADER_SIZE = 20
|
|
|
|
|
2011-08-16 20:47:07 +00:00
|
|
|
attr_reader :logger, :size, :auths, :primary, :safe, :primary_pool,
|
|
|
|
:host_to_try, :pool_size, :connect_timeout
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
# Counter for generating unique request ids.
|
|
|
|
@@current_request_id = 0
|
2009-08-20 14:50:48 +00:00
|
|
|
|
2010-11-24 18:49:34 +00:00
|
|
|
# Create a connection to single MongoDB instance.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# You may specify whether connection to slave is permitted.
|
2010-01-07 17:37:53 +00:00
|
|
|
# In all cases, the default host is "localhost" and the default port is 27017.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-12-15 19:15:49 +00:00
|
|
|
# If you're connecting to a replica set, you'll need to use ReplSetConnection.new instead.
|
2010-01-08 20:43:13 +00:00
|
|
|
#
|
2010-08-24 15:20:54 +00:00
|
|
|
# Once connected to a replica set, you can find out which nodes are primary, secondary, and
|
|
|
|
# arbiters with the corresponding accessors: Connection#primary, Connection#secondaries, and
|
|
|
|
# Connection#arbiters. This is useful if your application needs to connect manually to nodes other
|
|
|
|
# than the primary.
|
|
|
|
#
|
2010-04-07 21:10:28 +00:00
|
|
|
# @param [String, Hash] host.
|
|
|
|
# @param [Integer] port specify a port number here if only one host is being specified.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
|
2010-11-03 21:36:08 +00:00
|
|
|
# propogated to DB objects instantiated off of this Connection. This
|
|
|
|
# default can be overridden upon instantiation of any DB by explicity setting a :safe value
|
|
|
|
# on initialization.
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [Boolean] :slave_ok (false) Must be set to +true+ when connecting
|
2010-01-07 17:37:53 +00:00
|
|
|
# to a single, slave node.
|
2011-01-31 20:51:39 +00:00
|
|
|
# @option opts [Logger, #debug] :logger (nil) A Logger instance for debugging driver ops. Note that
|
|
|
|
# logging negatively impacts performance; therefore, it should not be used for high-performance apps.
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [Integer] :pool_size (1) The maximum number of socket connections allowed per
|
2010-11-24 18:49:34 +00:00
|
|
|
# connection pool. Note: this setting is relevant only for multi-threaded applications.
|
2011-06-15 20:17:42 +00:00
|
|
|
# @option opts [Float] :pool_timeout (5.0) When all of the connections a pool are checked out,
|
2010-01-07 17:37:53 +00:00
|
|
|
# this is the number of seconds to wait for a new connection to be released before throwing an exception.
|
2010-11-24 18:49:34 +00:00
|
|
|
# Note: this setting is relevant only for multi-threaded applications (which in Ruby are rare).
|
2011-03-28 15:09:27 +00:00
|
|
|
# @option opts [Float] :op_timeout (nil) The number of seconds to wait for a read operation to time out.
|
|
|
|
# Disabled by default.
|
2011-06-15 18:20:11 +00:00
|
|
|
# @option opts [Float] :connect_timeout (nil) The number of seconds to wait before timing out a
|
|
|
|
# connection attempt.
|
2009-11-23 21:03:33 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 27017
|
|
|
|
# Connection.new
|
2009-12-16 19:03:15 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 27017
|
|
|
|
# Connection.new("localhost")
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 3000, max 5 connections, with max 5 seconds of wait time.
|
|
|
|
# Connection.new("localhost", 3000, :pool_size => 5, :timeout => 5)
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @example localhost, 3000, where this node may be a slave
|
|
|
|
# Connection.new("localhost", 3000, :slave_ok => true)
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# @see http://api.mongodb.org/ruby/current/file.REPLICA_SETS.html Replica sets in Ruby
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
2010-11-03 19:12:15 +00:00
|
|
|
# @raise [ReplicaSetConnectionError] This is raised if a replica set name is specified and the
|
|
|
|
# driver fails to connect to a replica set with that name.
|
|
|
|
#
|
2010-02-17 20:15:07 +00:00
|
|
|
# @core connections
|
2011-01-05 16:30:20 +00:00
|
|
|
def initialize(host=nil, port=nil, opts={})
|
2010-12-10 21:00:35 +00:00
|
|
|
@host_to_try = format_pair(host, port)
|
2009-11-23 20:20:05 +00:00
|
|
|
|
|
|
|
# Host and port of current master.
|
|
|
|
@host = @port = nil
|
2009-12-16 19:03:15 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# slave_ok can be true only if one node is specified
|
2011-01-05 16:30:20 +00:00
|
|
|
@slave_ok = opts[:slave_ok]
|
2010-07-19 16:07:46 +00:00
|
|
|
|
2011-01-05 16:30:20 +00:00
|
|
|
setup(opts)
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
|
|
|
|
2010-12-10 21:00:35 +00:00
|
|
|
# DEPRECATED
|
|
|
|
#
|
2010-07-26 22:05:23 +00:00
|
|
|
# Initialize a connection to a MongoDB replica set using an array of seed nodes.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
2010-11-24 18:49:34 +00:00
|
|
|
# The seed nodes specified will be used on the initial connection to the replica set, but note
|
|
|
|
# that this list of nodes will be replced by the list of canonical nodes returned by running the
|
|
|
|
# is_master command on the replica set.
|
2010-07-26 22:05:23 +00:00
|
|
|
#
|
|
|
|
# @param nodes [Array] An array of arrays, each of which specifies a host and port.
|
2010-11-24 18:49:34 +00:00
|
|
|
# @param opts [Hash] Any of the available options that can be passed to Connection.new.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [String] :rs_name (nil) The name of the replica set to connect to. An exception will be
|
2010-11-24 18:49:34 +00:00
|
|
|
# raised if unable to connect to a replica set with this name.
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [Boolean] :read_secondary (false) When true, this connection object will pick a random slave
|
2010-11-24 18:49:34 +00:00
|
|
|
# to send reads to.
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
|
|
|
# @example
|
2010-11-24 18:49:34 +00:00
|
|
|
# Connection.multi([["db1.example.com", 27017], ["db2.example.com", 27017]])
|
|
|
|
#
|
|
|
|
# @example This connection will read from a random secondary node.
|
2010-07-26 22:05:23 +00:00
|
|
|
# Connection.multi([["db1.example.com", 27017], ["db2.example.com", 27017], ["db3.example.com", 27017]],
|
2010-11-24 18:49:34 +00:00
|
|
|
# :read_secondary => true)
|
2010-07-19 16:07:46 +00:00
|
|
|
#
|
|
|
|
# @return [Mongo::Connection]
|
2010-12-10 21:00:35 +00:00
|
|
|
#
|
|
|
|
# @deprecated
|
2010-07-19 16:07:46 +00:00
|
|
|
def self.multi(nodes, opts={})
|
2010-12-13 19:07:32 +00:00
|
|
|
warn "Connection.multi is now deprecated. Please use ReplSetConnection.new instead."
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-12-13 19:07:32 +00:00
|
|
|
nodes << opts
|
|
|
|
ReplSetConnection.new(*nodes)
|
2010-07-19 16:07:46 +00:00
|
|
|
end
|
|
|
|
|
2010-02-17 20:15:07 +00:00
|
|
|
# Initialize a connection to MongoDB using the MongoDB URI spec:
|
|
|
|
#
|
|
|
|
# @param uri [String]
|
|
|
|
# A string of the format mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/database]
|
|
|
|
#
|
|
|
|
# @param opts Any of the options available for Connection.new
|
|
|
|
#
|
2010-12-30 20:40:50 +00:00
|
|
|
# @return [Mongo::Connection, Mongo::ReplSetConnection]
|
|
|
|
def self.from_uri(string, extra_opts={})
|
|
|
|
uri = URIParser.new(string)
|
|
|
|
opts = uri.connection_options
|
|
|
|
opts.merge!(extra_opts)
|
|
|
|
|
|
|
|
if uri.nodes.length == 1
|
|
|
|
opts.merge!({:auths => uri.auths})
|
|
|
|
Connection.new(uri.nodes[0][0], uri.nodes[0][1], opts)
|
|
|
|
elsif uri.nodes.length > 1
|
|
|
|
nodes = uri.nodes.clone
|
|
|
|
nodes_with_opts = nodes << opts
|
|
|
|
ReplSetConnection.new(*nodes_with_opts)
|
2010-12-13 19:07:32 +00:00
|
|
|
else
|
|
|
|
raise MongoArgumentError, "No nodes specified. Please ensure that you've provided at least one node."
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
2009-10-07 23:39:36 +00:00
|
|
|
end
|
2009-01-16 14:52:31 +00:00
|
|
|
|
2011-03-23 20:34:42 +00:00
|
|
|
# The host name used for this connection.
|
|
|
|
#
|
|
|
|
# @return [String]
|
|
|
|
def host
|
|
|
|
@primary_pool.host
|
|
|
|
end
|
|
|
|
|
|
|
|
# The port used for this connection.
|
|
|
|
#
|
|
|
|
# @return [Integer]
|
|
|
|
def port
|
|
|
|
@primary_pool.port
|
|
|
|
end
|
|
|
|
|
2010-09-28 16:15:45 +00:00
|
|
|
# Fsync, then lock the mongod process against writes. Use this to get
|
|
|
|
# the datafiles in a state safe for snapshotting, backing up, etc.
|
|
|
|
#
|
|
|
|
# @return [BSON::OrderedHash] the command response
|
|
|
|
def lock!
|
|
|
|
cmd = BSON::OrderedHash.new
|
|
|
|
cmd[:fsync] = 1
|
|
|
|
cmd[:lock] = true
|
|
|
|
self['admin'].command(cmd)
|
|
|
|
end
|
|
|
|
|
2010-10-04 15:38:20 +00:00
|
|
|
# Is this database locked against writes?
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def locked?
|
|
|
|
self['admin']['$cmd.sys.inprog'].find_one['fsyncLock'] == 1
|
|
|
|
end
|
|
|
|
|
2010-09-28 16:15:45 +00:00
|
|
|
# Unlock a previously fsync-locked mongod process.
|
|
|
|
#
|
|
|
|
# @return [BSON::OrderedHash] command response
|
|
|
|
def unlock!
|
|
|
|
self['admin']['$cmd.sys.unlock'].find_one
|
|
|
|
end
|
|
|
|
|
2010-02-25 19:58:32 +00:00
|
|
|
# Apply each of the saved database authentications.
|
|
|
|
#
|
|
|
|
# @return [Boolean] returns true if authentications exist and succeeed, false
|
|
|
|
# if none exists.
|
|
|
|
#
|
|
|
|
# @raise [AuthenticationError] raises an exception if any one
|
|
|
|
# authentication fails.
|
2011-01-31 19:47:05 +00:00
|
|
|
def apply_saved_authentication(opts={})
|
2010-02-25 19:58:32 +00:00
|
|
|
return false if @auths.empty?
|
|
|
|
@auths.each do |auth|
|
2011-01-31 19:47:05 +00:00
|
|
|
self[auth['db_name']].issue_authentication(auth['username'], auth['password'], false,
|
|
|
|
:socket => opts[:socket])
|
2010-02-25 19:58:32 +00:00
|
|
|
end
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
|
|
|
# Save an authentication to this connection. When connecting,
|
|
|
|
# the connection will attempt to re-authenticate on every db
|
2010-03-01 15:39:50 +00:00
|
|
|
# specificed in the list of auths. This method is called automatically
|
|
|
|
# by DB#authenticate.
|
2010-02-25 19:58:32 +00:00
|
|
|
#
|
2010-05-04 20:06:06 +00:00
|
|
|
# Note: this method will not actually issue an authentication command. To do that,
|
|
|
|
# either run Connection#apply_saved_authentication or DB#authenticate.
|
|
|
|
#
|
2010-02-25 19:58:32 +00:00
|
|
|
# @param [String] db_name
|
|
|
|
# @param [String] username
|
|
|
|
# @param [String] password
|
|
|
|
#
|
|
|
|
# @return [Hash] a hash representing the authentication just added.
|
|
|
|
def add_auth(db_name, username, password)
|
|
|
|
remove_auth(db_name)
|
|
|
|
auth = {}
|
|
|
|
auth['db_name'] = db_name
|
|
|
|
auth['username'] = username
|
|
|
|
auth['password'] = password
|
|
|
|
@auths << auth
|
|
|
|
auth
|
|
|
|
end
|
|
|
|
|
|
|
|
# Remove a saved authentication for this connection.
|
|
|
|
#
|
|
|
|
# @param [String] db_name
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def remove_auth(db_name)
|
|
|
|
return unless @auths
|
|
|
|
if @auths.reject! { |a| a['db_name'] == db_name }
|
|
|
|
true
|
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Remove all authenication information stored in this connection.
|
|
|
|
#
|
|
|
|
# @return [true] this operation return true because it always succeeds.
|
|
|
|
def clear_auths
|
|
|
|
@auths = []
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
2011-01-31 19:47:05 +00:00
|
|
|
def authenticate_pools
|
|
|
|
@primary_pool.authenticate_existing
|
|
|
|
end
|
|
|
|
|
|
|
|
def logout_pools(db)
|
|
|
|
@primary_pool.logout_existing(db)
|
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return a hash with all database names
|
|
|
|
# and their respective sizes on disk.
|
|
|
|
#
|
|
|
|
# @return [Hash]
|
2009-08-20 14:50:48 +00:00
|
|
|
def database_info
|
2010-05-18 20:17:17 +00:00
|
|
|
doc = self['admin'].command({:listDatabases => 1})
|
2010-09-04 13:14:46 +00:00
|
|
|
doc['databases'].each_with_object({}) do |db, info|
|
|
|
|
info[db['name']] = db['sizeOnDisk'].to_i
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return an array of database names.
|
|
|
|
#
|
|
|
|
# @return [Array]
|
2009-08-20 14:50:48 +00:00
|
|
|
def database_names
|
|
|
|
database_info.keys
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Return a database with the given name.
|
|
|
|
# See DB#new for valid options hash parameters.
|
|
|
|
#
|
|
|
|
# @param [String] db_name a valid database name.
|
2011-01-05 16:30:20 +00:00
|
|
|
# @param [Hash] opts options to be passed to the DB constructor.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
|
|
|
# @return [Mongo::DB]
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core databases db-instance_method
|
2011-01-05 16:30:20 +00:00
|
|
|
def db(db_name, opts={})
|
|
|
|
DB.new(db_name, self, opts)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Shortcut for returning a database. Use DB#db to accept options.
|
|
|
|
#
|
|
|
|
# @param [String] db_name a valid database name.
|
|
|
|
#
|
|
|
|
# @return [Mongo::DB]
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core databases []-instance_method
|
2009-11-23 20:20:05 +00:00
|
|
|
def [](db_name)
|
2010-11-03 21:36:08 +00:00
|
|
|
DB.new(db_name, self, :safe => @safe)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Drop a database.
|
|
|
|
#
|
|
|
|
# @param [String] name name of an existing database.
|
2009-08-20 14:50:48 +00:00
|
|
|
def drop_database(name)
|
2009-11-23 20:20:05 +00:00
|
|
|
self[name].command(:dropDatabase => 1)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-01-23 18:30:59 +00:00
|
|
|
|
2010-03-16 17:56:30 +00:00
|
|
|
# Copy the database +from+ to +to+ on localhost. The +from+ database is
|
|
|
|
# assumed to be on localhost, but an alternate host can be specified.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
|
|
|
# @param [String] from name of the database to copy from.
|
|
|
|
# @param [String] to name of the database to copy to.
|
|
|
|
# @param [String] from_host host of the 'from' database.
|
2010-03-16 17:56:30 +00:00
|
|
|
# @param [String] username username for authentication against from_db (>=1.3.x).
|
|
|
|
# @param [String] password password for authentication against from_db (>=1.3.x).
|
|
|
|
def copy_database(from, to, from_host="localhost", username=nil, password=nil)
|
2010-05-07 01:25:18 +00:00
|
|
|
oh = BSON::OrderedHash.new
|
2009-11-04 16:57:03 +00:00
|
|
|
oh[:copydb] = 1
|
2010-01-07 17:37:53 +00:00
|
|
|
oh[:fromhost] = from_host
|
2009-11-04 16:57:03 +00:00
|
|
|
oh[:fromdb] = from
|
|
|
|
oh[:todb] = to
|
2010-03-16 17:56:30 +00:00
|
|
|
if username || password
|
|
|
|
unless username && password
|
|
|
|
raise MongoArgumentError, "Both username and password must be supplied for authentication."
|
|
|
|
end
|
2010-05-07 01:25:18 +00:00
|
|
|
nonce_cmd = BSON::OrderedHash.new
|
2010-03-16 17:56:30 +00:00
|
|
|
nonce_cmd[:copydbgetnonce] = 1
|
|
|
|
nonce_cmd[:fromhost] = from_host
|
2010-05-18 20:17:17 +00:00
|
|
|
result = self["admin"].command(nonce_cmd)
|
2010-03-16 17:56:30 +00:00
|
|
|
oh[:nonce] = result["nonce"]
|
|
|
|
oh[:username] = username
|
|
|
|
oh[:key] = Mongo::Support.auth_key(username, password, oh[:nonce])
|
|
|
|
end
|
2010-05-18 20:17:17 +00:00
|
|
|
self["admin"].command(oh)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2011-03-03 19:22:32 +00:00
|
|
|
# Checks if a server is alive. This command will return immediately
|
|
|
|
# even if the server is in a lock.
|
|
|
|
#
|
|
|
|
# @return [Hash]
|
|
|
|
def ping
|
|
|
|
self["admin"].command({:ping => 1})
|
|
|
|
end
|
|
|
|
|
|
|
|
# Get the build information for the current connection.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
|
|
|
# @return [Hash]
|
2009-10-26 18:54:33 +00:00
|
|
|
def server_info
|
2010-05-18 20:17:17 +00:00
|
|
|
self["admin"].command({:buildinfo => 1})
|
2009-10-26 18:54:33 +00:00
|
|
|
end
|
|
|
|
|
2011-03-03 19:22:32 +00:00
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Get the build version of the current server.
|
|
|
|
#
|
|
|
|
# @return [Mongo::ServerVersion]
|
|
|
|
# object allowing easy comparability of version.
|
2009-10-26 18:54:33 +00:00
|
|
|
def server_version
|
|
|
|
ServerVersion.new(server_info["version"])
|
|
|
|
end
|
|
|
|
|
2010-01-08 21:18:07 +00:00
|
|
|
# Is it okay to connect to a slave?
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def slave_ok?
|
2010-12-15 17:36:43 +00:00
|
|
|
@slave_ok
|
2010-01-08 21:18:07 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Send a message to MongoDB, adding the necessary headers.
|
|
|
|
#
|
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2009-11-23 20:20:05 +00:00
|
|
|
#
|
2011-05-10 18:21:23 +00:00
|
|
|
# @option opts [Symbol] :connection (:writer) The connection to which
|
|
|
|
# this message should be sent. Valid options are :writer and :reader.
|
|
|
|
#
|
2010-07-28 02:19:25 +00:00
|
|
|
# @return [Integer] number of bytes sent
|
2011-05-10 18:21:23 +00:00
|
|
|
def send_message(operation, message, opts={})
|
|
|
|
if opts.is_a?(String)
|
|
|
|
warn "Connection#send_message no longer takes a string log message. " +
|
|
|
|
"Logging is now handled within the Collection and Cursor classes."
|
|
|
|
opts = {}
|
|
|
|
end
|
|
|
|
|
|
|
|
connection = opts.fetch(:connection, :writer)
|
|
|
|
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
2010-12-15 17:12:51 +00:00
|
|
|
add_message_headers(message, operation)
|
|
|
|
packed_message = message.to_s
|
2011-05-10 18:21:23 +00:00
|
|
|
|
|
|
|
if connection == :writer
|
|
|
|
socket = checkout_writer
|
|
|
|
else
|
|
|
|
socket = checkout_reader
|
|
|
|
end
|
|
|
|
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, socket)
|
|
|
|
ensure
|
2011-05-10 18:21:23 +00:00
|
|
|
if connection == :writer
|
|
|
|
checkin_writer(socket)
|
|
|
|
else
|
|
|
|
checkin_reader(socket)
|
|
|
|
end
|
2010-01-20 17:40:16 +00:00
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Sends a message to the database, waits for a response, and raises
|
2009-12-23 17:12:46 +00:00
|
|
|
# an exception if the operation has failed.
|
2009-11-24 19:23:43 +00:00
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [String] db_name the name of the database. used on call to get_last_error.
|
2010-05-04 20:00:05 +00:00
|
|
|
# @param [Hash] last_error_params parameters to be sent to getLastError. See DB#error for
|
|
|
|
# available options.
|
|
|
|
#
|
2010-08-04 20:47:13 +00:00
|
|
|
# @see DB#get_last_error for valid last error params.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
2010-11-09 18:07:01 +00:00
|
|
|
# @return [Hash] The document returned by the call to getlasterror.
|
2010-05-04 20:00:05 +00:00
|
|
|
def send_message_with_safe_check(operation, message, db_name, log_message=nil, last_error_params=false)
|
2010-12-15 17:12:51 +00:00
|
|
|
docs = num_received = cursor_id = ''
|
|
|
|
add_message_headers(message, operation)
|
|
|
|
|
|
|
|
last_error_message = BSON::ByteBuffer.new
|
|
|
|
build_last_error_message(last_error_message, db_name, last_error_params)
|
|
|
|
last_error_id = add_message_headers(last_error_message, Mongo::Constants::OP_QUERY)
|
|
|
|
|
|
|
|
packed_message = message.append!(last_error_message).to_s
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
2010-11-16 20:43:59 +00:00
|
|
|
sock = checkout_writer
|
2010-07-12 18:14:35 +00:00
|
|
|
@safe_mutexes[sock].synchronize do
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, sock)
|
2010-12-15 17:12:51 +00:00
|
|
|
docs, num_received, cursor_id = receive(sock, last_error_id)
|
2010-01-20 17:40:16 +00:00
|
|
|
end
|
|
|
|
ensure
|
2010-11-16 20:43:59 +00:00
|
|
|
checkin_writer(sock)
|
2009-12-29 18:27:12 +00:00
|
|
|
end
|
2010-11-09 18:07:01 +00:00
|
|
|
|
2010-05-04 20:00:05 +00:00
|
|
|
if num_received == 1 && (error = docs[0]['err'] || docs[0]['errmsg'])
|
2010-10-30 03:18:49 +00:00
|
|
|
close if error == "not master"
|
2010-12-15 19:15:20 +00:00
|
|
|
error = "wtimeout" if error == "timeout"
|
2010-11-09 18:07:01 +00:00
|
|
|
raise Mongo::OperationFailure, docs[0]['code'].to_s + ': ' + error
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2010-11-09 18:07:01 +00:00
|
|
|
|
|
|
|
docs[0]
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2009-11-24 19:23:43 +00:00
|
|
|
# Sends a message to the database and waits for the response.
|
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Integer] operation a MongoDB opcode.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [BSON::ByteBuffer] message a message to send to the database.
|
2011-05-10 18:35:51 +00:00
|
|
|
# @param [String] log_message this is currently a no-op and will be removed.
|
2010-01-07 17:37:53 +00:00
|
|
|
# @param [Socket] socket a socket to use in lieu of checking out a new one.
|
2011-05-10 18:35:51 +00:00
|
|
|
# @param [Boolean] command (false) indicate whether this is a command. If this is a command,
|
|
|
|
# the message will be sent to the primary node.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
|
|
|
# @return [Array]
|
|
|
|
# An array whose indexes include [0] documents returned, [1] number of document received,
|
|
|
|
# and [3] a cursor_id.
|
2010-11-16 20:43:59 +00:00
|
|
|
def receive_message(operation, message, log_message=nil, socket=nil, command=false)
|
2010-12-15 17:12:51 +00:00
|
|
|
request_id = add_message_headers(message, operation)
|
|
|
|
packed_message = message.to_s
|
2010-01-20 17:40:16 +00:00
|
|
|
begin
|
2011-01-31 19:47:05 +00:00
|
|
|
if socket
|
|
|
|
sock = socket
|
|
|
|
checkin = false
|
|
|
|
else
|
|
|
|
sock = (command ? checkout_writer : checkout_reader)
|
|
|
|
checkin = true
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
|
2010-01-20 17:40:16 +00:00
|
|
|
result = ''
|
2010-07-12 18:14:35 +00:00
|
|
|
@safe_mutexes[sock].synchronize do
|
2010-01-20 17:40:16 +00:00
|
|
|
send_message_on_socket(packed_message, sock)
|
2010-12-13 22:54:28 +00:00
|
|
|
result = receive(sock, request_id)
|
2010-01-20 17:40:16 +00:00
|
|
|
end
|
|
|
|
ensure
|
2011-01-31 19:47:05 +00:00
|
|
|
if checkin
|
|
|
|
command ? checkin_writer(sock) : checkin_reader(sock)
|
|
|
|
end
|
2009-12-29 18:27:12 +00:00
|
|
|
end
|
2009-11-24 18:47:37 +00:00
|
|
|
result
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-01-07 17:37:53 +00:00
|
|
|
# Create a new socket and attempt to connect to master.
|
2010-01-05 22:42:52 +00:00
|
|
|
# If successful, sets host and port to master and returns the socket.
|
2010-01-07 17:37:53 +00:00
|
|
|
#
|
2010-11-03 19:32:19 +00:00
|
|
|
# If connecting to a replica set, this method will replace the
|
2010-07-19 16:07:46 +00:00
|
|
|
# initially-provided seed list with any nodes known to the set.
|
|
|
|
#
|
2010-01-07 17:37:53 +00:00
|
|
|
# @raise [ConnectionFailure] if unable to connect to any host or port.
|
2010-07-19 16:07:46 +00:00
|
|
|
def connect
|
2011-03-29 16:18:58 +00:00
|
|
|
close
|
2010-07-19 16:07:46 +00:00
|
|
|
|
2010-12-10 21:00:35 +00:00
|
|
|
config = check_is_master(@host_to_try)
|
2010-12-29 18:01:05 +00:00
|
|
|
if config
|
|
|
|
if config['ismaster'] == 1 || config['ismaster'] == true
|
|
|
|
@read_primary = true
|
|
|
|
elsif @slave_ok
|
|
|
|
@read_primary = false
|
|
|
|
end
|
|
|
|
|
2010-12-10 21:00:35 +00:00
|
|
|
set_primary(@host_to_try)
|
2010-07-19 16:07:46 +00:00
|
|
|
end
|
|
|
|
|
2010-12-29 23:06:31 +00:00
|
|
|
if connected?
|
|
|
|
BSON::BSON_CODER.update_max_bson_size(self)
|
|
|
|
else
|
2010-12-15 17:36:43 +00:00
|
|
|
raise ConnectionFailure, "Failed to connect to a master node at #{@host_to_try[0]}:#{@host_to_try[1]}"
|
|
|
|
end
|
2010-07-19 16:07:46 +00:00
|
|
|
end
|
2011-02-02 16:36:56 +00:00
|
|
|
alias :reconnect :connect
|
2010-07-19 16:07:46 +00:00
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
def connecting?
|
2010-11-30 17:36:45 +00:00
|
|
|
@nodes_to_try.length > 0
|
2010-11-16 20:43:59 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# It's possible that we defined connected as all nodes being connected???
|
|
|
|
# NOTE: Do check if this needs to be more stringent.
|
|
|
|
# Probably not since if any node raises a connection failure, all nodes will be closed.
|
2009-11-23 20:20:05 +00:00
|
|
|
def connected?
|
2010-11-16 20:43:59 +00:00
|
|
|
@primary_pool && @primary_pool.host && @primary_pool.port
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2011-03-03 10:32:10 +00:00
|
|
|
# Determine if the connection is active. In a normal case the *server_info* operation
|
|
|
|
# will be performed without issues, but if the connection was dropped by the server or
|
|
|
|
# for some reason the sockets are unsynchronized, a ConnectionFailure will be raised and
|
|
|
|
# the return will be false.
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def active?
|
|
|
|
return false unless connected?
|
|
|
|
|
2011-03-03 19:22:32 +00:00
|
|
|
ping
|
2011-03-03 10:32:10 +00:00
|
|
|
true
|
|
|
|
|
|
|
|
rescue ConnectionFailure
|
|
|
|
false
|
|
|
|
end
|
|
|
|
|
2010-12-29 18:01:05 +00:00
|
|
|
# Determine whether we're reading from a primary node. If false,
|
|
|
|
# this connection connects to a secondary node and @slave_ok is true.
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def read_primary?
|
|
|
|
@read_primary
|
|
|
|
end
|
2011-01-17 18:37:41 +00:00
|
|
|
alias :primary? :read_primary?
|
2010-12-29 18:01:05 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Close the connection to the database.
|
|
|
|
def close
|
2010-11-16 20:43:59 +00:00
|
|
|
@primary_pool.close if @primary_pool
|
|
|
|
@primary_pool = nil
|
2011-03-29 16:18:58 +00:00
|
|
|
@primary = nil
|
2010-02-17 20:15:07 +00:00
|
|
|
end
|
|
|
|
|
2010-12-29 23:06:31 +00:00
|
|
|
# Returns the maximum BSON object size as returned by the core server.
|
|
|
|
# Use the 4MB default when the server doesn't report this.
|
|
|
|
#
|
|
|
|
# @return [Integer]
|
|
|
|
def max_bson_size
|
|
|
|
config = self['admin'].command({:ismaster => 1})
|
|
|
|
config['maxBsonObjectSize'] || Mongo::DEFAULT_MAX_BSON_SIZE
|
|
|
|
end
|
|
|
|
|
2010-11-16 20:43:59 +00:00
|
|
|
# Checkout a socket for reading (i.e., a secondary node).
|
2010-12-15 17:36:43 +00:00
|
|
|
# Note: this is overridden in ReplSetConnection.
|
2010-11-16 20:43:59 +00:00
|
|
|
def checkout_reader
|
|
|
|
connect unless connected?
|
2010-12-15 17:36:43 +00:00
|
|
|
@primary_pool.checkout
|
2010-11-16 20:43:59 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Checkout a socket for writing (i.e., a primary node).
|
2010-12-15 17:36:43 +00:00
|
|
|
# Note: this is overridden in ReplSetConnection.
|
2010-11-16 20:43:59 +00:00
|
|
|
def checkout_writer
|
|
|
|
connect unless connected?
|
|
|
|
@primary_pool.checkout
|
|
|
|
end
|
|
|
|
|
|
|
|
# Checkin a socket used for reading.
|
2010-12-15 17:36:43 +00:00
|
|
|
# Note: this is overridden in ReplSetConnection.
|
2010-11-16 20:43:59 +00:00
|
|
|
def checkin_reader(socket)
|
2010-12-15 17:36:43 +00:00
|
|
|
if @primary_pool
|
|
|
|
@primary_pool.checkin(socket)
|
2010-11-16 20:43:59 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Checkin a socket used for writing.
|
2010-12-15 17:36:43 +00:00
|
|
|
# Note: this is overridden in ReplSetConnection.
|
2010-11-16 20:43:59 +00:00
|
|
|
def checkin_writer(socket)
|
|
|
|
if @primary_pool
|
|
|
|
@primary_pool.checkin(socket)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-01-31 20:51:39 +00:00
|
|
|
# Execute the block and log the operation described by name
|
|
|
|
# and payload.
|
|
|
|
# TODO: Not sure if this should take a block.
|
|
|
|
def instrument(name, payload = {}, &blk)
|
2011-01-29 06:20:41 +00:00
|
|
|
res = yield
|
|
|
|
log_operation(name, payload)
|
|
|
|
res
|
|
|
|
end
|
|
|
|
|
2010-12-13 19:07:32 +00:00
|
|
|
protected
|
|
|
|
|
|
|
|
# Generic initialization code.
|
2011-01-05 16:30:20 +00:00
|
|
|
def setup(opts)
|
2010-12-13 19:07:32 +00:00
|
|
|
# Authentication objects
|
2011-01-05 16:30:20 +00:00
|
|
|
@auths = opts.fetch(:auths, [])
|
2010-12-13 19:07:32 +00:00
|
|
|
|
|
|
|
# Lock for request ids.
|
|
|
|
@id_lock = Mutex.new
|
2009-11-24 19:23:43 +00:00
|
|
|
|
2010-12-13 19:07:32 +00:00
|
|
|
# Pool size and timeout.
|
2011-01-05 16:30:20 +00:00
|
|
|
@pool_size = opts[:pool_size] || 1
|
2011-06-15 20:17:42 +00:00
|
|
|
if opts[:timeout]
|
|
|
|
warn "The :timeout option has been deprecated " +
|
|
|
|
"and will be removed in the 2.0 release. Use :pool_timeout instead."
|
|
|
|
end
|
|
|
|
@timeout = opts[:pool_timeout] || opts[:timeout] || 5.0
|
2010-12-13 19:07:32 +00:00
|
|
|
|
2011-03-28 15:09:27 +00:00
|
|
|
# Timeout on socket read operation.
|
|
|
|
@op_timeout = opts[:op_timeout] || nil
|
|
|
|
|
2011-06-15 18:20:11 +00:00
|
|
|
# Timeout on socket connect.
|
|
|
|
@connect_timeout = opts[:connect_timeout] || nil
|
|
|
|
|
2010-12-13 19:07:32 +00:00
|
|
|
# Mutex for synchronizing pool access
|
|
|
|
@connection_mutex = Mutex.new
|
|
|
|
|
|
|
|
# Global safe option. This is false by default.
|
2011-01-05 16:30:20 +00:00
|
|
|
@safe = opts[:safe] || false
|
2010-12-13 19:07:32 +00:00
|
|
|
|
|
|
|
# Create a mutex when a new key, in this case a socket,
|
|
|
|
# is added to the hash.
|
|
|
|
@safe_mutexes = Hash.new { |h, k| h[k] = Mutex.new }
|
|
|
|
|
|
|
|
# Condition variable for signal and wait
|
|
|
|
@queue = ConditionVariable.new
|
|
|
|
|
|
|
|
# Connection pool for primay node
|
|
|
|
@primary = nil
|
|
|
|
@primary_pool = nil
|
|
|
|
|
2011-01-31 20:51:39 +00:00
|
|
|
@logger = opts[:logger] || nil
|
|
|
|
|
|
|
|
if @logger
|
|
|
|
@logger.debug("MongoDB logging. Please note that logging negatively impacts performance " +
|
|
|
|
"and should be disabled for high-performance production apps.")
|
|
|
|
end
|
2010-12-13 19:07:32 +00:00
|
|
|
|
2011-01-05 16:30:20 +00:00
|
|
|
should_connect = opts.fetch(:connect, true)
|
2010-12-13 19:07:32 +00:00
|
|
|
connect if should_connect
|
|
|
|
end
|
|
|
|
|
|
|
|
## Configuration helper methods
|
|
|
|
|
|
|
|
# Returns a host-port pair.
|
|
|
|
#
|
|
|
|
# @return [Array]
|
|
|
|
#
|
|
|
|
# @private
|
|
|
|
def format_pair(host, port)
|
|
|
|
case host
|
|
|
|
when String
|
|
|
|
[host, port ? port.to_i : DEFAULT_PORT]
|
|
|
|
when nil
|
|
|
|
['localhost', DEFAULT_PORT]
|
2010-11-19 23:26:38 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-01-29 06:20:41 +00:00
|
|
|
## Logging methods
|
|
|
|
|
2011-01-31 20:51:39 +00:00
|
|
|
def log_operation(name, payload)
|
2011-01-29 06:20:41 +00:00
|
|
|
return unless @logger
|
|
|
|
msg = "#{payload[:database]}['#{payload[:collection]}'].#{name}("
|
|
|
|
msg += payload.values_at(:selector, :document, :documents, :fields ).compact.map(&:inspect).join(', ') + ")"
|
|
|
|
msg += ".skip(#{payload[:skip]})" if payload[:skip]
|
|
|
|
msg += ".limit(#{payload[:limit]})" if payload[:limit]
|
2011-03-25 18:57:22 +00:00
|
|
|
msg += ".sort(#{payload[:order]})" if payload[:order]
|
2011-01-29 06:20:41 +00:00
|
|
|
@logger.debug "MONGODB #{msg}"
|
|
|
|
end
|
|
|
|
|
2009-11-24 19:23:43 +00:00
|
|
|
private
|
|
|
|
|
2010-12-15 17:41:33 +00:00
|
|
|
## Methods for establishing a connection:
|
|
|
|
|
2010-07-19 16:23:12 +00:00
|
|
|
# If a ConnectionFailure is raised, this method will be called
|
|
|
|
# to close the connection and reset connection values.
|
2010-12-10 21:00:35 +00:00
|
|
|
# TODO: evaluate whether this method is actually necessary
|
2010-07-19 16:23:12 +00:00
|
|
|
def reset_connection
|
|
|
|
close
|
|
|
|
end
|
|
|
|
|
|
|
|
def check_is_master(node)
|
|
|
|
begin
|
|
|
|
host, port = *node
|
2011-06-15 18:20:11 +00:00
|
|
|
|
|
|
|
if @connect_timeout
|
|
|
|
Mongo::TimeoutHandler.timeout(@connect_timeout, OperationTimeout) do
|
|
|
|
socket = TCPSocket.new(host, port)
|
|
|
|
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
socket = TCPSocket.new(host, port)
|
|
|
|
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
|
|
|
end
|
2010-07-19 16:23:12 +00:00
|
|
|
|
2011-01-31 19:47:05 +00:00
|
|
|
config = self['admin'].command({:ismaster => 1}, :socket => socket)
|
2010-07-19 16:23:12 +00:00
|
|
|
rescue OperationFailure, SocketError, SystemCallError, IOError => ex
|
2010-12-14 18:14:45 +00:00
|
|
|
close
|
2010-07-19 16:23:12 +00:00
|
|
|
ensure
|
|
|
|
socket.close if socket
|
|
|
|
end
|
|
|
|
|
|
|
|
config
|
|
|
|
end
|
|
|
|
|
2011-01-31 19:47:05 +00:00
|
|
|
# Set the specified node as primary.
|
2010-07-19 16:23:12 +00:00
|
|
|
def set_primary(node)
|
2010-11-16 20:43:59 +00:00
|
|
|
host, port = *node
|
|
|
|
@primary = [host, port]
|
2010-11-24 18:49:34 +00:00
|
|
|
@primary_pool = Pool.new(self, host, port, :size => @pool_size, :timeout => @timeout)
|
2010-07-19 16:23:12 +00:00
|
|
|
end
|
|
|
|
|
2010-12-15 17:36:43 +00:00
|
|
|
## Low-level connection methods.
|
|
|
|
|
2010-12-13 22:54:28 +00:00
|
|
|
def receive(sock, expected_response)
|
2010-12-14 20:53:59 +00:00
|
|
|
begin
|
2010-12-13 22:54:28 +00:00
|
|
|
receive_header(sock, expected_response)
|
2009-11-23 20:20:05 +00:00
|
|
|
number_received, cursor_id = receive_response_header(sock)
|
|
|
|
read_documents(number_received, cursor_id, sock)
|
2010-12-14 20:53:59 +00:00
|
|
|
rescue Mongo::ConnectionFailure => ex
|
|
|
|
close
|
|
|
|
raise ex
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-12-13 22:54:28 +00:00
|
|
|
def receive_header(sock, expected_response)
|
|
|
|
header = receive_message_on_socket(16, sock)
|
|
|
|
size, request_id, response_to = header.unpack('VVV')
|
|
|
|
if expected_response != response_to
|
|
|
|
raise Mongo::ConnectionFailure, "Expected response #{expected_response} but got #{response_to}"
|
|
|
|
end
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
unless header.size == STANDARD_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header: " +
|
2009-12-16 19:03:15 +00:00
|
|
|
"expected #{STANDARD_HEADER_SIZE} bytes, saw #{header.size}"
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2010-12-13 22:54:28 +00:00
|
|
|
nil
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2010-10-30 03:18:49 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
def receive_response_header(sock)
|
2010-09-11 19:29:57 +00:00
|
|
|
header_buf = receive_message_on_socket(RESPONSE_HEADER_SIZE, sock)
|
2009-11-23 20:20:05 +00:00
|
|
|
if header_buf.length != RESPONSE_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header; " +
|
|
|
|
"expected #{RESPONSE_HEADER_SIZE} bytes, saw #{header_buf.length}"
|
|
|
|
end
|
2010-09-11 19:29:57 +00:00
|
|
|
flags, cursor_id_a, cursor_id_b, starting_from, number_remaining = header_buf.unpack('VVVVV')
|
|
|
|
check_response_flags(flags)
|
|
|
|
cursor_id = (cursor_id_b << 32) + cursor_id_a
|
2009-11-23 20:20:05 +00:00
|
|
|
[number_remaining, cursor_id]
|
|
|
|
end
|
|
|
|
|
2010-07-12 16:11:01 +00:00
|
|
|
def check_response_flags(flags)
|
|
|
|
if flags & Mongo::Constants::REPLY_CURSOR_NOT_FOUND != 0
|
|
|
|
raise Mongo::OperationFailure, "Query response returned CURSOR_NOT_FOUND. " +
|
|
|
|
"Either an invalid cursor was specified, or the cursor may have timed out on the server."
|
|
|
|
elsif flags & Mongo::Constants::REPLY_QUERY_FAILURE != 0
|
2010-07-12 17:10:43 +00:00
|
|
|
# Getting odd failures when a exception is raised here.
|
2010-07-12 16:11:01 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
def read_documents(number_received, cursor_id, sock)
|
|
|
|
docs = []
|
|
|
|
number_remaining = number_received
|
|
|
|
while number_remaining > 0 do
|
2010-09-11 17:46:10 +00:00
|
|
|
buf = receive_message_on_socket(4, sock)
|
|
|
|
size = buf.unpack('V')[0]
|
|
|
|
buf << receive_message_on_socket(size - 4, sock)
|
2009-11-23 20:20:05 +00:00
|
|
|
number_remaining -= 1
|
2010-04-05 14:39:55 +00:00
|
|
|
docs << BSON::BSON_CODER.deserialize(buf)
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
[docs, number_received, cursor_id]
|
|
|
|
end
|
|
|
|
|
2010-05-04 20:00:05 +00:00
|
|
|
# Constructs a getlasterror message. This method is used exclusively by
|
|
|
|
# Connection#send_message_with_safe_check.
|
2010-12-15 17:41:33 +00:00
|
|
|
#
|
|
|
|
# Because it modifies message by reference, we don't need to return it.
|
2010-12-15 17:12:51 +00:00
|
|
|
def build_last_error_message(message, db_name, opts)
|
2009-11-23 20:20:05 +00:00
|
|
|
message.put_int(0)
|
2010-04-05 14:39:55 +00:00
|
|
|
BSON::BSON_RUBY.serialize_cstr(message, "#{db_name}.$cmd")
|
2009-11-23 20:20:05 +00:00
|
|
|
message.put_int(0)
|
|
|
|
message.put_int(-1)
|
2010-05-07 01:25:18 +00:00
|
|
|
cmd = BSON::OrderedHash.new
|
2010-05-04 20:00:05 +00:00
|
|
|
cmd[:getlasterror] = 1
|
|
|
|
if opts.is_a?(Hash)
|
|
|
|
opts.assert_valid_keys(:w, :wtimeout, :fsync)
|
|
|
|
cmd.merge!(opts)
|
|
|
|
end
|
2010-09-12 20:24:20 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(cmd, false).to_s)
|
2010-12-15 17:12:51 +00:00
|
|
|
nil
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
2009-12-16 19:03:15 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Prepares a message for transmission to MongoDB by
|
2009-11-24 19:43:52 +00:00
|
|
|
# constructing a valid message header.
|
2010-12-15 17:12:51 +00:00
|
|
|
#
|
2010-12-15 17:41:33 +00:00
|
|
|
# Note: this method modifies message by reference.
|
|
|
|
#
|
2011-01-03 19:06:41 +00:00
|
|
|
# @return [Integer] the request id used in the header
|
2010-12-15 17:12:51 +00:00
|
|
|
def add_message_headers(message, operation)
|
2010-09-13 20:53:16 +00:00
|
|
|
headers = [
|
|
|
|
# Message size.
|
|
|
|
16 + message.size,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Unique request id.
|
2010-12-15 17:12:51 +00:00
|
|
|
request_id = get_request_id,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Response id.
|
|
|
|
0,
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-13 20:53:16 +00:00
|
|
|
# Opcode.
|
|
|
|
operation
|
|
|
|
].pack('VVVV')
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
message.prepend!(headers)
|
2010-12-15 17:12:51 +00:00
|
|
|
|
|
|
|
request_id
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
2010-12-15 17:36:43 +00:00
|
|
|
# Increment and return the next available request id.
|
|
|
|
#
|
|
|
|
# return [Integer]
|
|
|
|
def get_request_id
|
|
|
|
request_id = ''
|
|
|
|
@id_lock.synchronize do
|
|
|
|
request_id = @@current_request_id += 1
|
|
|
|
end
|
|
|
|
request_id
|
|
|
|
end
|
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
# Low-level method for sending a message on a socket.
|
2009-12-16 19:03:15 +00:00
|
|
|
# Requires a packed message and an available socket,
|
2010-07-28 02:19:25 +00:00
|
|
|
#
|
|
|
|
# @return [Integer] number of bytes sent
|
2009-11-23 20:20:05 +00:00
|
|
|
def send_message_on_socket(packed_message, socket)
|
2009-11-24 18:55:59 +00:00
|
|
|
begin
|
2010-09-13 20:55:36 +00:00
|
|
|
total_bytes_sent = socket.send(packed_message, 0)
|
|
|
|
if total_bytes_sent != packed_message.size
|
|
|
|
packed_message.slice!(0, total_bytes_sent)
|
|
|
|
while packed_message.size > 0
|
|
|
|
byte_sent = socket.send(packed_message, 0)
|
|
|
|
total_bytes_sent += byte_sent
|
|
|
|
packed_message.slice!(0, byte_sent)
|
|
|
|
end
|
2010-06-24 17:46:06 +00:00
|
|
|
end
|
2010-07-28 02:19:25 +00:00
|
|
|
total_bytes_sent
|
2009-11-24 18:55:59 +00:00
|
|
|
rescue => ex
|
|
|
|
close
|
|
|
|
raise ConnectionFailure, "Operation failed with the following exception: #{ex}"
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# Low-level method for receiving data from socket.
|
|
|
|
# Requires length and an available socket.
|
|
|
|
def receive_message_on_socket(length, socket)
|
2009-11-24 18:55:59 +00:00
|
|
|
begin
|
2011-03-29 15:06:46 +00:00
|
|
|
if @op_timeout
|
2011-03-29 15:46:29 +00:00
|
|
|
message = nil
|
2011-03-29 15:06:46 +00:00
|
|
|
Mongo::TimeoutHandler.timeout(@op_timeout, OperationTimeout) do
|
|
|
|
message = receive_data(length, socket)
|
2010-09-11 10:22:51 +00:00
|
|
|
end
|
2011-03-29 15:06:46 +00:00
|
|
|
else
|
|
|
|
message = receive_data(length, socket)
|
2009-11-24 18:55:59 +00:00
|
|
|
end
|
2011-03-29 15:06:46 +00:00
|
|
|
rescue => ex
|
2010-02-04 23:07:45 +00:00
|
|
|
close
|
2011-03-28 15:09:27 +00:00
|
|
|
|
|
|
|
if ex.class == OperationTimeout
|
2011-03-29 15:46:29 +00:00
|
|
|
raise OperationTimeout, "Timed out waiting on socket read."
|
2011-03-28 15:09:27 +00:00
|
|
|
else
|
|
|
|
raise ConnectionFailure, "Operation failed with the following exception: #{ex}"
|
|
|
|
end
|
2009-11-23 20:20:05 +00:00
|
|
|
end
|
|
|
|
message
|
|
|
|
end
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2011-03-29 15:06:46 +00:00
|
|
|
def receive_data(length, socket)
|
|
|
|
message = new_binary_string
|
|
|
|
socket.read(length, message)
|
|
|
|
raise ConnectionFailure, "connection closed" unless message && message.length > 0
|
|
|
|
if message.length < length
|
|
|
|
chunk = new_binary_string
|
|
|
|
while message.length < length
|
|
|
|
socket.read(length - message.length, chunk)
|
|
|
|
raise ConnectionFailure, "connection closed" unless chunk.length > 0
|
|
|
|
message << chunk
|
|
|
|
end
|
|
|
|
end
|
|
|
|
message
|
|
|
|
end
|
|
|
|
|
2010-09-11 10:22:51 +00:00
|
|
|
if defined?(Encoding)
|
|
|
|
BINARY_ENCODING = Encoding.find("binary")
|
2010-11-16 20:43:59 +00:00
|
|
|
|
2010-09-11 10:22:51 +00:00
|
|
|
def new_binary_string
|
|
|
|
"".force_encoding(BINARY_ENCODING)
|
|
|
|
end
|
|
|
|
else
|
|
|
|
def new_binary_string
|
|
|
|
""
|
|
|
|
end
|
|
|
|
end
|
2009-08-20 22:48:09 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|