2008-12-17 16:49:06 +00:00
|
|
|
# --
|
2009-01-06 15:51:01 +00:00
|
|
|
# Copyright (C) 2008-2009 10gen Inc.
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2008-12-17 16:49:06 +00:00
|
|
|
# ++
|
2008-11-22 01:00:51 +00:00
|
|
|
|
|
|
|
require 'socket'
|
2009-01-21 16:52:43 +00:00
|
|
|
require 'digest/md5'
|
2009-10-19 15:46:59 +00:00
|
|
|
require 'thread'
|
2008-11-22 01:00:51 +00:00
|
|
|
require 'mongo/collection'
|
2008-12-08 16:38:42 +00:00
|
|
|
require 'mongo/util/ordered_hash.rb'
|
2009-01-07 20:36:12 +00:00
|
|
|
require 'mongo/admin'
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
module Mongo
|
2009-01-16 21:10:52 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# A Mongo database.
|
|
|
|
class DB
|
2009-08-13 20:26:51 +00:00
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
STANDARD_HEADER_SIZE = 16
|
|
|
|
RESPONSE_HEADER_SIZE = 20
|
2009-08-20 14:50:48 +00:00
|
|
|
SYSTEM_NAMESPACE_COLLECTION = "system.namespaces"
|
|
|
|
SYSTEM_INDEX_COLLECTION = "system.indexes"
|
|
|
|
SYSTEM_PROFILE_COLLECTION = "system.profile"
|
|
|
|
SYSTEM_USER_COLLECTION = "system.users"
|
|
|
|
SYSTEM_COMMAND_COLLECTION = "$cmd"
|
2009-08-13 20:26:51 +00:00
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
# Counter for generating unique request ids.
|
|
|
|
@@current_request_id = 0
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Strict mode enforces collection existence checks. When +true+,
|
|
|
|
# asking for a collection that does not exist or trying to create a
|
|
|
|
# collection that already exists raises an error.
|
|
|
|
#
|
|
|
|
# Strict mode is off (+false+) by default. Its value can be changed at
|
|
|
|
# any time.
|
|
|
|
attr_writer :strict
|
2009-01-14 23:37:28 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns the value of the +strict+ flag.
|
|
|
|
def strict?; @strict; end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# The name of the database.
|
|
|
|
attr_reader :name
|
2009-01-21 16:26:18 +00:00
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
attr_reader :connection
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Host to which we are currently connected.
|
|
|
|
attr_reader :host
|
|
|
|
# Port to which we are currently connected.
|
|
|
|
attr_reader :port
|
2009-01-21 16:52:43 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# An array of [host, port] pairs.
|
|
|
|
attr_reader :nodes
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# The database's socket. For internal (and Cursor) use only.
|
|
|
|
attr_reader :socket
|
2009-10-07 23:39:36 +00:00
|
|
|
|
|
|
|
# The logger instance if :logger is passed to initialize
|
|
|
|
attr_reader :logger
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def slave_ok?; @slave_ok; end
|
|
|
|
def auto_reconnect?; @auto_reconnect; end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# A primary key factory object (or +nil+). See the README.doc file or
|
|
|
|
# DB#new for details.
|
|
|
|
attr_reader :pk_factory
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def pk_factory=(pk_factory)
|
|
|
|
raise "error: can not change PK factory" if @pk_factory
|
|
|
|
@pk_factory = pk_factory
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Instances of DB are normally obtained by calling Mongo#db.
|
|
|
|
#
|
|
|
|
# db_name :: The database name
|
|
|
|
#
|
2009-08-20 22:48:09 +00:00
|
|
|
# nodes :: An array of [host, port] pairs. See Connection#new, which offers
|
2009-08-20 14:50:48 +00:00
|
|
|
# a more flexible way of defining nodes.
|
|
|
|
#
|
|
|
|
# options :: A hash of options.
|
|
|
|
#
|
|
|
|
# Options:
|
|
|
|
#
|
|
|
|
# :strict :: If true, collections must exist to be accessed and must
|
|
|
|
# not exist to be created. See #collection and
|
|
|
|
# #create_collection.
|
|
|
|
#
|
|
|
|
# :pk :: A primary key factory object that must respond to :create_pk,
|
|
|
|
# which should take a hash and return a hash which merges the
|
|
|
|
# original hash with any primary key fields the factory wishes
|
|
|
|
# to inject. (NOTE: if the object already has a primary key,
|
|
|
|
# the factory should not inject a new key; this means that the
|
|
|
|
# object is being used in a repsert but it already exists.) The
|
|
|
|
# idea here is that when ever a record is inserted, the :pk
|
|
|
|
# object's +create_pk+ method will be called and the new hash
|
|
|
|
# returned will be inserted.
|
|
|
|
#
|
|
|
|
# :slave_ok :: Only used if +nodes+ contains only one host/port. If
|
|
|
|
# false, when connecting to that host/port we check to
|
|
|
|
# see if the server is the master. If it is not, an error
|
|
|
|
# is thrown.
|
|
|
|
#
|
|
|
|
# :auto_reconnect :: If the connection gets closed (for example, we
|
|
|
|
# have a server pair and saw the "not master"
|
|
|
|
# error, which closes the connection), then
|
|
|
|
# automatically try to reconnect to the master or
|
|
|
|
# to the single server we have been given. Defaults
|
|
|
|
# to +false+.
|
2009-09-11 16:12:24 +00:00
|
|
|
# :logger :: Optional Logger instance to which driver usage information
|
|
|
|
# will be logged.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
|
|
|
# When a DB object first connects to a pair, it will find the master
|
|
|
|
# instance and connect to that one. On socket error or if we recieve a
|
|
|
|
# "not master" error, we again find the master of the pair.
|
|
|
|
def initialize(db_name, nodes, options={})
|
|
|
|
case db_name
|
|
|
|
when Symbol, String
|
|
|
|
else
|
|
|
|
raise TypeError, "db_name must be a string or symbol"
|
|
|
|
end
|
|
|
|
|
|
|
|
[" ", ".", "$", "/", "\\"].each do |invalid_char|
|
|
|
|
if db_name.include? invalid_char
|
|
|
|
raise InvalidName, "database names cannot contain the character '#{invalid_char}'"
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
if db_name.empty?
|
|
|
|
raise InvalidName, "database name cannot be the empty string"
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
@connection = options[:connection]
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
@name, @nodes = db_name, nodes
|
|
|
|
@strict = options[:strict]
|
|
|
|
@pk_factory = options[:pk]
|
|
|
|
@slave_ok = options[:slave_ok] && @nodes.length == 1 # only OK if one node
|
|
|
|
@auto_reconnect = options[:auto_reconnect]
|
2009-10-19 15:46:59 +00:00
|
|
|
@semaphore = Mutex.new
|
2009-08-20 14:50:48 +00:00
|
|
|
@socket = nil
|
2009-09-10 21:24:06 +00:00
|
|
|
@logger = options[:logger]
|
2009-08-20 14:50:48 +00:00
|
|
|
connect_to_master
|
|
|
|
end
|
2008-12-17 16:43:08 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def connect_to_master
|
|
|
|
close if @socket
|
|
|
|
@host = @port = nil
|
|
|
|
@nodes.detect { |hp|
|
|
|
|
@host, @port = *hp
|
|
|
|
begin
|
|
|
|
@socket = TCPSocket.new(@host, @port)
|
|
|
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
|
|
|
|
|
|
|
# Check for master. Can't call master? because it uses mutex,
|
|
|
|
# which may already be in use during this call.
|
|
|
|
semaphore_is_locked = @semaphore.locked?
|
|
|
|
@semaphore.unlock if semaphore_is_locked
|
|
|
|
is_master = master?
|
|
|
|
@semaphore.lock if semaphore_is_locked
|
|
|
|
|
2009-10-15 16:23:29 +00:00
|
|
|
if !@slave_ok && !is_master
|
|
|
|
raise ConfigurationError, "Trying to connect directly to slave; if this is what you want, specify :slave_ok => true."
|
|
|
|
end
|
2009-08-24 14:51:15 +00:00
|
|
|
@slave_ok || is_master
|
2009-08-20 14:50:48 +00:00
|
|
|
rescue SocketError, SystemCallError, IOError => ex
|
|
|
|
close if @socket
|
2009-08-24 14:51:15 +00:00
|
|
|
false
|
2008-12-16 22:35:31 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
}
|
|
|
|
raise "error: failed to connect to any given host:port" unless @socket
|
|
|
|
end
|
2008-12-16 22:35:31 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns true if +username+ has +password+ in
|
|
|
|
# +SYSTEM_USER_COLLECTION+. +name+ is username, +password+ is
|
|
|
|
# plaintext password.
|
|
|
|
def authenticate(username, password)
|
|
|
|
doc = db_command(:getnonce => 1)
|
|
|
|
raise "error retrieving nonce: #{doc}" unless ok?(doc)
|
|
|
|
nonce = doc['nonce']
|
|
|
|
|
|
|
|
auth = OrderedHash.new
|
|
|
|
auth['authenticate'] = 1
|
|
|
|
auth['user'] = username
|
|
|
|
auth['nonce'] = nonce
|
|
|
|
auth['key'] = Digest::MD5.hexdigest("#{nonce}#{username}#{hash_password(username, password)}")
|
|
|
|
ok?(db_command(auth))
|
|
|
|
end
|
2009-01-23 18:47:27 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Deauthorizes use for this database for this connection.
|
|
|
|
def logout
|
|
|
|
doc = db_command(:logout => 1)
|
|
|
|
raise "error logging out: #{doc.inspect}" unless ok?(doc)
|
|
|
|
end
|
2009-01-23 18:47:27 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns an array of collection names in this database.
|
|
|
|
def collection_names
|
|
|
|
names = collections_info.collect { |doc| doc['name'] || '' }
|
|
|
|
names = names.delete_if {|name| name.index(@name).nil? || name.index('$')}
|
|
|
|
names.map {|name| name.sub(@name + '.', '')}
|
|
|
|
end
|
2009-05-26 19:26:20 +00:00
|
|
|
|
2009-08-27 20:29:41 +00:00
|
|
|
# Retruns an array of Collection instances, one for each collection in this
|
|
|
|
# database.
|
|
|
|
def collections
|
|
|
|
collection_names.map do |collection_name|
|
|
|
|
Collection.new(self, collection_name)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns a cursor over query result hashes. Each hash contains a
|
|
|
|
# 'name' string and optionally an 'options' hash. If +coll_name+ is
|
|
|
|
# specified, an array of length 1 is returned.
|
|
|
|
def collections_info(coll_name=nil)
|
|
|
|
selector = {}
|
2009-10-19 21:14:41 +00:00
|
|
|
selector[:name] = full_collection_name(coll_name) if coll_name
|
2009-10-22 18:10:12 +00:00
|
|
|
Cursor.new(Collection.new(self, SYSTEM_NAMESPACE_COLLECTION), :selector => selector)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-05-26 19:26:20 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Create a collection. If +strict+ is false, will return existing or
|
|
|
|
# new collection. If +strict+ is true, will raise an error if
|
|
|
|
# collection +name+ already exists.
|
|
|
|
#
|
|
|
|
# Options is an optional hash:
|
|
|
|
#
|
|
|
|
# :capped :: Boolean. If not specified, capped is +false+.
|
|
|
|
#
|
|
|
|
# :size :: If +capped+ is +true+, specifies the maximum number of
|
|
|
|
# bytes. If +false+, specifies the initial extent of the
|
|
|
|
# collection.
|
|
|
|
#
|
|
|
|
# :max :: Max number of records in a capped collection. Optional.
|
|
|
|
def create_collection(name, options={})
|
|
|
|
# First check existence
|
|
|
|
if collection_names.include?(name)
|
|
|
|
if strict?
|
|
|
|
raise "Collection #{name} already exists. Currently in strict mode."
|
|
|
|
else
|
|
|
|
return Collection.new(self, name)
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Create new collection
|
|
|
|
oh = OrderedHash.new
|
|
|
|
oh[:create] = name
|
|
|
|
doc = db_command(oh.merge(options || {}))
|
|
|
|
ok = doc['ok']
|
2009-10-19 21:14:41 +00:00
|
|
|
return Collection.new(self, name, @pk_factory) if ok.kind_of?(Numeric) && (ok.to_i == 1 || ok.to_i == 0)
|
2009-08-20 14:50:48 +00:00
|
|
|
raise "Error creating collection: #{doc.inspect}"
|
|
|
|
end
|
2009-01-13 18:08:04 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def admin
|
|
|
|
Admin.new(self)
|
|
|
|
end
|
2009-01-14 23:37:28 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Return a collection. If +strict+ is false, will return existing or
|
|
|
|
# new collection. If +strict+ is true, will raise an error if
|
|
|
|
# collection +name+ does not already exists.
|
|
|
|
def collection(name)
|
2009-10-19 21:14:41 +00:00
|
|
|
return Collection.new(self, name, @pk_factory) if !strict? || collection_names.include?(name)
|
2009-08-20 14:50:48 +00:00
|
|
|
raise "Collection #{name} doesn't exist. Currently in strict mode."
|
|
|
|
end
|
|
|
|
alias_method :[], :collection
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Drop collection +name+. Returns +true+ on success or if the
|
|
|
|
# collection does not exist, +false+ otherwise.
|
|
|
|
def drop_collection(name)
|
|
|
|
return true unless collection_names.include?(name)
|
2009-03-04 19:28:00 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
ok?(db_command(:drop => name))
|
|
|
|
end
|
2009-02-26 17:06:03 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns the error message from the most recently executed database
|
|
|
|
# operation for this connection, or +nil+ if there was no error.
|
|
|
|
def error
|
|
|
|
doc = db_command(:getlasterror => 1)
|
|
|
|
raise "error retrieving last error: #{doc}" unless ok?(doc)
|
|
|
|
doc['err']
|
|
|
|
end
|
2009-01-13 19:02:16 +00:00
|
|
|
|
2009-10-01 16:01:37 +00:00
|
|
|
# Get status information from the last operation on this connection.
|
|
|
|
def last_status
|
|
|
|
db_command(:getlasterror => 1)
|
|
|
|
end
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns +true+ if an error was caused by the most recently executed
|
|
|
|
# database operation.
|
|
|
|
def error?
|
|
|
|
error != nil
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Get the most recent error to have occured on this database
|
|
|
|
#
|
|
|
|
# Only returns errors that have occured since the last call to
|
|
|
|
# DB#reset_error_history - returns +nil+ if there is no such error.
|
|
|
|
def previous_error
|
|
|
|
error = db_command(:getpreverror => 1)
|
|
|
|
if error["err"]
|
|
|
|
error
|
|
|
|
else
|
|
|
|
nil
|
|
|
|
end
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Reset the error history of this database
|
|
|
|
#
|
|
|
|
# Calls to DB#previous_error will only return errors that have occurred
|
|
|
|
# since the most recent call to this method.
|
|
|
|
def reset_error_history
|
|
|
|
db_command(:reseterror => 1)
|
|
|
|
end
|
2008-12-17 16:43:08 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns true if this database is a master (or is not paired with any
|
|
|
|
# other database), false if it is a slave.
|
|
|
|
def master?
|
|
|
|
doc = db_command(:ismaster => 1)
|
|
|
|
is_master = doc['ismaster']
|
|
|
|
ok?(doc) && is_master.kind_of?(Numeric) && is_master.to_i == 1
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns a string of the form "host:port" that points to the master
|
|
|
|
# database. Works even if this is the master database.
|
|
|
|
def master
|
|
|
|
doc = db_command(:ismaster => 1)
|
|
|
|
is_master = doc['ismaster']
|
|
|
|
raise "Error retrieving master database: #{doc.inspect}" unless ok?(doc) && is_master.kind_of?(Numeric)
|
|
|
|
case is_master.to_i
|
|
|
|
when 1
|
|
|
|
"#@host:#@port"
|
|
|
|
else
|
|
|
|
doc['remote']
|
|
|
|
end
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Close the connection to the database.
|
|
|
|
def close
|
|
|
|
if @socket
|
|
|
|
s = @socket
|
|
|
|
@socket = nil
|
|
|
|
s.close
|
|
|
|
end
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def connected?
|
|
|
|
@socket != nil
|
|
|
|
end
|
2009-04-28 18:55:36 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
def receive_full(length)
|
|
|
|
message = ""
|
|
|
|
while message.length < length do
|
|
|
|
chunk = @socket.recv(length - message.length)
|
|
|
|
raise "connection closed" unless chunk.length > 0
|
|
|
|
message += chunk
|
|
|
|
end
|
|
|
|
message
|
|
|
|
end
|
2009-03-13 15:03:52 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Returns a Cursor over the query results.
|
|
|
|
#
|
|
|
|
# Note that the query gets sent lazily; the cursor calls
|
|
|
|
# #send_query_message when needed. If the caller never requests an
|
|
|
|
# object from the cursor, the query never gets sent.
|
|
|
|
def query(collection, query, admin=false)
|
|
|
|
Cursor.new(self, collection, query, admin)
|
|
|
|
end
|
2009-08-04 18:16:02 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Dereference a DBRef, getting the document it points to.
|
|
|
|
def dereference(dbref)
|
|
|
|
collection(dbref.namespace).find_one("_id" => dbref.object_id)
|
|
|
|
end
|
2008-12-16 22:35:31 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Evaluate a JavaScript expression on MongoDB.
|
|
|
|
# +code+ should be a string or Code instance containing a JavaScript
|
|
|
|
# expression. Additional arguments will be passed to that expression
|
|
|
|
# when it is run on the server.
|
|
|
|
def eval(code, *args)
|
|
|
|
if not code.is_a? Code
|
|
|
|
code = Code.new(code)
|
|
|
|
end
|
2008-12-08 16:38:42 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
oh = OrderedHash.new
|
|
|
|
oh[:$eval] = code
|
|
|
|
oh[:args] = args
|
|
|
|
doc = db_command(oh)
|
|
|
|
return doc['retval'] if ok?(doc)
|
|
|
|
raise OperationFailure, "eval failed: #{doc['errmsg']}"
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Rename collection +from+ to +to+. Meant to be called by
|
|
|
|
# Collection#rename.
|
|
|
|
def rename_collection(from, to)
|
|
|
|
oh = OrderedHash.new
|
|
|
|
oh[:renameCollection] = "#{@name}.#{from}"
|
|
|
|
oh[:to] = "#{@name}.#{to}"
|
|
|
|
doc = db_command(oh, true)
|
|
|
|
raise "Error renaming collection: #{doc.inspect}" unless ok?(doc)
|
|
|
|
end
|
2009-08-10 20:10:52 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Drop index +name+ from +collection_name+. Normally called from
|
|
|
|
# Collection#drop_index or Collection#drop_indexes.
|
|
|
|
def drop_index(collection_name, name)
|
|
|
|
oh = OrderedHash.new
|
|
|
|
oh[:deleteIndexes] = collection_name
|
|
|
|
oh[:index] = name
|
|
|
|
doc = db_command(oh)
|
|
|
|
raise "Error with drop_index command: #{doc.inspect}" unless ok?(doc)
|
|
|
|
end
|
2009-01-21 16:26:18 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Get information on the indexes for the collection +collection_name+.
|
|
|
|
# Normally called by Collection#index_information. Returns a hash where
|
|
|
|
# the keys are index names (as returned by Collection#create_index and
|
|
|
|
# the values are lists of [key, direction] pairs specifying the index
|
|
|
|
# (as passed to Collection#create_index).
|
|
|
|
def index_information(collection_name)
|
2009-10-19 21:14:41 +00:00
|
|
|
sel = {:ns => full_collection_name(collection_name)}
|
2009-08-20 14:50:48 +00:00
|
|
|
info = {}
|
2009-10-22 18:10:12 +00:00
|
|
|
Cursor.new(Collection.new(self, SYSTEM_INDEX_COLLECTION), :selector => sel).each { |index|
|
2009-11-17 18:20:57 +00:00
|
|
|
info[index['name']] = index['key'].map
|
2009-08-20 14:50:48 +00:00
|
|
|
}
|
|
|
|
info
|
|
|
|
end
|
|
|
|
|
|
|
|
# Create a new index on +collection_name+. +field_or_spec+
|
|
|
|
# should be either a single field name or a Array of [field name,
|
|
|
|
# direction] pairs. Directions should be specified as
|
|
|
|
# Mongo::ASCENDING or Mongo::DESCENDING. Normally called
|
|
|
|
# by Collection#create_index. If +unique+ is true the index will
|
|
|
|
# enforce a uniqueness constraint.
|
|
|
|
def create_index(collection_name, field_or_spec, unique=false)
|
2009-10-19 21:14:41 +00:00
|
|
|
self.collection(collection_name).create_index(field_or_spec, unique)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
# Sends a message to MongoDB.
|
|
|
|
#
|
2009-11-02 20:04:06 +00:00
|
|
|
# Takes a MongoDB opcode, +operation+, a message of class ByteBuffer,
|
|
|
|
# +message+, and an optional formatted +log_message+.
|
|
|
|
# Sends the message to the databse, adding the necessary headers.
|
|
|
|
def send_message_with_operation(operation, message, log_message=nil)
|
2009-11-04 22:46:15 +00:00
|
|
|
message_with_headers = add_message_headers(operation, message).to_s
|
|
|
|
@logger.debug(" MONGODB #{log_message || message}") if @logger
|
2009-10-20 19:44:46 +00:00
|
|
|
@semaphore.synchronize do
|
2009-11-04 22:46:15 +00:00
|
|
|
send_message_on_socket(message_with_headers)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-12 19:48:49 +00:00
|
|
|
def send_message_with_operation_raw(operation, message, log_message=nil)
|
|
|
|
message_with_headers = add_message_headers_raw(operation, message)
|
|
|
|
@logger.debug(" MONGODB #{log_message || message}") if @logger
|
|
|
|
@semaphore.synchronize do
|
|
|
|
send_message_on_socket(message_with_headers)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-05 20:14:48 +00:00
|
|
|
# Sends a message to the database, waits for a response, and raises
|
|
|
|
# and exception if the operation has failed.
|
|
|
|
def send_message_with_safe_check(operation, message, log_message=nil)
|
|
|
|
message_with_headers = add_message_headers(operation, message)
|
|
|
|
message_with_check = last_error_message
|
|
|
|
@logger.debug(" MONGODB #{log_message || message}") if @logger
|
|
|
|
@semaphore.synchronize do
|
|
|
|
send_message_on_socket(message_with_headers.append!(message_with_check).to_s)
|
|
|
|
docs, num_received, cursor_id = receive
|
|
|
|
if num_received == 1 && error = docs[0]['err']
|
|
|
|
raise Mongo::OperationFailure, error
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-05 21:14:37 +00:00
|
|
|
# Send a message to the database and waits for the response.
|
2009-11-04 22:46:15 +00:00
|
|
|
def receive_message_with_operation(operation, message, log_message=nil)
|
|
|
|
message_with_headers = add_message_headers(operation, message).to_s
|
|
|
|
@logger.debug(" MONGODB #{log_message || message}") if @logger
|
|
|
|
@semaphore.synchronize do
|
|
|
|
send_message_on_socket(message_with_headers)
|
|
|
|
receive
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-11-05 21:14:37 +00:00
|
|
|
# Return +true+ if +doc+ contains an 'ok' field with the value 1.
|
|
|
|
def ok?(doc)
|
|
|
|
ok = doc['ok']
|
|
|
|
ok.kind_of?(Numeric) && ok.to_i == 1
|
|
|
|
end
|
|
|
|
|
|
|
|
# DB commands need to be ordered, so selector must be an OrderedHash
|
|
|
|
# (or a Hash with only one element). What DB commands really need is
|
|
|
|
# that the "command" key be first.
|
|
|
|
def db_command(selector, use_admin_db=false)
|
|
|
|
if !selector.kind_of?(OrderedHash)
|
|
|
|
if !selector.kind_of?(Hash) || selector.keys.length > 1
|
|
|
|
raise "db_command must be given an OrderedHash when there is more than one key"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
cursor = Cursor.new(Collection.new(self, SYSTEM_COMMAND_COLLECTION), :admin => use_admin_db, :limit => -1, :selector => selector)
|
|
|
|
cursor.next_object
|
|
|
|
end
|
|
|
|
|
|
|
|
# Sends a command to the database.
|
|
|
|
#
|
|
|
|
# :selector (required) :: An OrderedHash, or a standard Hash with just one
|
|
|
|
# key, specifying the command to be performed.
|
|
|
|
#
|
|
|
|
# :admin (optional) :: If true, the command will be executed on the admin
|
|
|
|
# collection.
|
|
|
|
#
|
|
|
|
# :check_response (optional) :: If true, will raise an exception if the
|
|
|
|
# command fails.
|
|
|
|
#
|
|
|
|
# Note: DB commands must start with the "command" key. For this reason,
|
|
|
|
# any selector containing more than one key must be an OrderedHash.
|
|
|
|
def command(selector, admin=false, check_response=false)
|
|
|
|
raise MongoArgumentError, "command must be given a selector" unless selector.is_a?(Hash) && !selector.empty?
|
|
|
|
if selector.class.eql?(Hash) && selector.keys.length > 1
|
|
|
|
raise MongoArgumentError, "DB#command requires an OrderedHash when hash contains multiple keys"
|
|
|
|
end
|
|
|
|
|
|
|
|
result = Cursor.new(system_command_collection, :admin => admin,
|
|
|
|
:limit => -1, :selector => selector).next_object
|
|
|
|
|
|
|
|
if check_response && !ok?(result)
|
|
|
|
raise OperationFailure, "Database command '#{selector.keys.first}' failed."
|
|
|
|
else
|
|
|
|
result
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def full_collection_name(collection_name)
|
|
|
|
"#{@name}.#{collection_name}"
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
def receive
|
|
|
|
receive_header
|
|
|
|
number_received, cursor_id = receive_response_header
|
|
|
|
read_documents(number_received, cursor_id)
|
|
|
|
end
|
|
|
|
|
|
|
|
def receive_header
|
|
|
|
header = ByteBuffer.new
|
|
|
|
header.put_array(receive_data_on_socket(16).unpack("C*"))
|
|
|
|
unless header.size == STANDARD_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header: " +
|
|
|
|
"expected #{STANDARD_HEADER_SIZE} bytes, saw #{header.size}"
|
|
|
|
end
|
|
|
|
header.rewind
|
|
|
|
size = header.get_int
|
|
|
|
request_id = header.get_int
|
|
|
|
response_to = header.get_int
|
|
|
|
op = header.get_int
|
|
|
|
end
|
|
|
|
|
|
|
|
def receive_response_header
|
|
|
|
header_buf = ByteBuffer.new
|
|
|
|
header_buf.put_array(receive_data_on_socket(RESPONSE_HEADER_SIZE).unpack("C*"))
|
|
|
|
if header_buf.length != RESPONSE_HEADER_SIZE
|
|
|
|
raise "Short read for DB response header; " +
|
|
|
|
"expected #{RESPONSE_HEADER_SIZE} bytes, saw #{header_buf.length}"
|
2009-10-19 21:14:41 +00:00
|
|
|
end
|
2009-11-04 22:46:15 +00:00
|
|
|
header_buf.rewind
|
|
|
|
result_flags = header_buf.get_int
|
|
|
|
cursor_id = header_buf.get_long
|
|
|
|
starting_from = header_buf.get_int
|
|
|
|
number_remaining = header_buf.get_int
|
|
|
|
[number_remaining, cursor_id]
|
|
|
|
end
|
|
|
|
|
|
|
|
def read_documents(number_received, cursor_id)
|
|
|
|
docs = []
|
|
|
|
number_remaining = number_received
|
|
|
|
while number_remaining > 0 do
|
|
|
|
buf = ByteBuffer.new
|
|
|
|
buf.put_array(receive_data_on_socket(4).unpack("C*"))
|
|
|
|
buf.rewind
|
|
|
|
size = buf.get_int
|
|
|
|
buf.put_array(receive_data_on_socket(size - 4).unpack("C*"), 4)
|
|
|
|
number_remaining -= 1
|
|
|
|
buf.rewind
|
|
|
|
docs << BSON.new.deserialize(buf)
|
|
|
|
end
|
|
|
|
[docs, number_received, cursor_id]
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
# Sending a message on socket.
|
2009-11-05 20:14:48 +00:00
|
|
|
def send_message_on_socket(packed_message)
|
2009-10-21 14:11:33 +00:00
|
|
|
connect_to_master if !connected? && @auto_reconnect
|
|
|
|
begin
|
2009-11-05 20:14:48 +00:00
|
|
|
@socket.print(packed_message)
|
2009-10-21 14:11:33 +00:00
|
|
|
@socket.flush
|
|
|
|
rescue => ex
|
|
|
|
close
|
|
|
|
raise ex
|
|
|
|
end
|
2009-10-20 19:44:46 +00:00
|
|
|
end
|
|
|
|
|
2009-11-04 22:46:15 +00:00
|
|
|
# Receive data of specified length on socket.
|
|
|
|
def receive_data_on_socket(length)
|
|
|
|
message = ""
|
|
|
|
while message.length < length do
|
|
|
|
chunk = @socket.recv(length - message.length)
|
|
|
|
raise "connection closed" unless chunk.length > 0
|
|
|
|
message += chunk
|
2009-10-20 19:44:46 +00:00
|
|
|
end
|
2009-11-04 22:46:15 +00:00
|
|
|
message
|
2009-10-20 19:44:46 +00:00
|
|
|
end
|
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
# Prepares a message for transmission to MongoDB by
|
|
|
|
# constructing a valid message header.
|
|
|
|
def add_message_headers(operation, message)
|
|
|
|
headers = ByteBuffer.new
|
|
|
|
|
|
|
|
# Message size.
|
|
|
|
headers.put_int(16 + message.size)
|
|
|
|
|
|
|
|
# Unique request id.
|
|
|
|
headers.put_int(get_request_id)
|
|
|
|
|
|
|
|
# Response id.
|
|
|
|
headers.put_int(0)
|
|
|
|
|
|
|
|
# Opcode.
|
|
|
|
headers.put_int(operation)
|
|
|
|
message.prepend!(headers)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
# Increments and then returns the next available request id.
|
|
|
|
# Note: this method should be called from within a lock.
|
|
|
|
def get_request_id
|
|
|
|
@@current_request_id += 1
|
|
|
|
@@current_request_id
|
|
|
|
end
|
|
|
|
|
2009-11-05 20:14:48 +00:00
|
|
|
# Creates a getlasterror message.
|
|
|
|
def last_error_message
|
|
|
|
generate_last_error_message
|
|
|
|
end
|
|
|
|
|
|
|
|
def generate_last_error_message
|
|
|
|
message = ByteBuffer.new
|
|
|
|
message.put_int(0)
|
|
|
|
BSON.serialize_cstr(message, "#{@name}.$cmd")
|
|
|
|
message.put_int(0)
|
|
|
|
message.put_int(-1)
|
2009-11-12 19:48:49 +00:00
|
|
|
message.put_array(BSON_SERIALIZER.serialize({:getlasterror => 1}, false).unpack("C*"))
|
2009-11-05 20:14:48 +00:00
|
|
|
add_message_headers(Mongo::Constants::OP_QUERY, message)
|
|
|
|
end
|
|
|
|
|
|
|
|
def reset_error_message
|
|
|
|
@@reset_error_message ||= generate_reset_error_message
|
|
|
|
end
|
|
|
|
|
|
|
|
def generate_reset_error_message
|
|
|
|
message = ByteBuffer.new
|
|
|
|
message.put_int(0)
|
|
|
|
BSON.serialize_cstr(message, "#{@name}.$cmd")
|
|
|
|
message.put_int(0)
|
|
|
|
message.put_int(-1)
|
2009-11-12 19:48:49 +00:00
|
|
|
message.put_array(BSON_SERIALIZER.serialize({:reseterror => 1}, false).unpack("C*"))
|
2009-11-05 20:14:48 +00:00
|
|
|
add_message_headers(Mongo::Constants::OP_QUERY, message)
|
|
|
|
end
|
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
def hash_password(username, plaintext)
|
|
|
|
Digest::MD5.hexdigest("#{username}:mongo:#{plaintext}")
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-11-02 18:22:46 +00:00
|
|
|
|
|
|
|
def system_command_collection
|
|
|
|
Collection.new(self, SYSTEM_COMMAND_COLLECTION)
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|
|
|
|
end
|