2010-05-07 19:33:27 +00:00
|
|
|
# encoding: UTF-8
|
|
|
|
|
2008-12-17 16:49:06 +00:00
|
|
|
# --
|
2011-01-17 17:26:32 +00:00
|
|
|
# Copyright (C) 2008-2011 10gen Inc.
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
2008-11-22 01:00:51 +00:00
|
|
|
#
|
2009-02-15 13:24:14 +00:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
module Mongo
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# A named collection of documents in a database.
|
2009-08-20 14:50:48 +00:00
|
|
|
class Collection
|
2011-09-07 21:14:53 +00:00
|
|
|
include Mongo::Logging
|
2009-08-17 15:11:03 +00:00
|
|
|
|
2010-11-03 21:36:08 +00:00
|
|
|
attr_reader :db, :name, :pk_factory, :hint, :safe
|
2009-01-13 20:51:41 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Initialize a collection object.
|
|
|
|
#
|
|
|
|
# @param [String, Symbol] name the name of the collection.
|
2011-01-05 14:44:46 +00:00
|
|
|
# @param [DB] db a MongoDB database instance.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
|
2010-11-03 22:36:29 +00:00
|
|
|
# other than the default BSON::ObjectId.
|
|
|
|
#
|
2011-01-05 16:30:20 +00:00
|
|
|
# @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
|
2010-11-03 21:36:08 +00:00
|
|
|
# for insert, update, and remove method called on this Collection instance. If no
|
|
|
|
# value is provided, the default value set on this instance's DB will be used. This
|
|
|
|
# default can be overridden for any invocation of insert, update, or remove.
|
2011-09-06 18:58:03 +00:00
|
|
|
# @option options [:primary, :secondary] :read The default read preference for queries
|
|
|
|
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
|
|
|
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
|
|
|
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
|
|
|
# preference for this collection's associated Mongo::DB object will be used.
|
2010-11-03 21:36:08 +00:00
|
|
|
#
|
2010-04-05 22:24:31 +00:00
|
|
|
# @raise [InvalidNSName]
|
2010-01-05 22:42:52 +00:00
|
|
|
# if collection name is empty, contains '$', or starts or ends with '.'
|
|
|
|
#
|
|
|
|
# @raise [TypeError]
|
|
|
|
# if collection name is not a string or symbol
|
|
|
|
#
|
|
|
|
# @return [Collection]
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core collections constructor_details
|
2011-01-05 16:30:20 +00:00
|
|
|
def initialize(name, db, opts={})
|
2011-01-05 14:44:46 +00:00
|
|
|
if db.is_a?(String) && name.is_a?(Mongo::DB)
|
|
|
|
warn "Warning: the order of parameters to initialize a collection have changed. " +
|
2011-09-13 21:50:01 +00:00
|
|
|
"Please specify the collection name first, followed by the db. This will be made permanent" +
|
2011-09-06 18:30:00 +00:00
|
|
|
"in v2.0."
|
2011-01-05 14:44:46 +00:00
|
|
|
db, name = name, db
|
|
|
|
end
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
case name
|
|
|
|
when Symbol, String
|
|
|
|
else
|
|
|
|
raise TypeError, "new_name must be a string or symbol"
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
name = name.to_s
|
2009-08-14 19:39:49 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
if name.empty? or name.include? ".."
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names cannot be empty"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-12-04 21:35:12 +00:00
|
|
|
if name.include? "$"
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names must not contain '$'" unless name =~ /((^\$cmd)|(oplog\.\$main))/
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
if name.match(/^\./) or name.match(/\.$/)
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-06-02 18:37:58 +00:00
|
|
|
|
2011-01-05 16:30:20 +00:00
|
|
|
if opts.respond_to?(:create_pk) || !opts.is_a?(Hash)
|
2010-11-03 22:36:29 +00:00
|
|
|
warn "The method for specifying a primary key factory on a Collection has changed.\n" +
|
|
|
|
"Please specify it as an option (e.g., :pk => PkFactory)."
|
2011-01-05 16:30:20 +00:00
|
|
|
pk_factory = opts
|
2010-11-03 22:36:29 +00:00
|
|
|
else
|
|
|
|
pk_factory = nil
|
|
|
|
end
|
|
|
|
|
2009-10-19 21:14:41 +00:00
|
|
|
@db, @name = db, name
|
2009-11-23 20:20:05 +00:00
|
|
|
@connection = @db.connection
|
2011-10-10 17:51:03 +00:00
|
|
|
@logger = @connection.logger
|
2010-11-10 02:28:07 +00:00
|
|
|
@cache_time = @db.cache_time
|
|
|
|
@cache = Hash.new(0)
|
2010-11-03 22:36:29 +00:00
|
|
|
unless pk_factory
|
2011-01-05 16:30:20 +00:00
|
|
|
@safe = opts.fetch(:safe, @db.safe)
|
2011-09-06 18:58:03 +00:00
|
|
|
if value = opts[:read]
|
|
|
|
Mongo::Support.validate_read_preference(value)
|
|
|
|
else
|
|
|
|
value = @db.read_preference
|
|
|
|
end
|
|
|
|
@read_preference = value.is_a?(Hash) ? value.dup : value
|
2010-11-03 22:36:29 +00:00
|
|
|
end
|
2011-01-05 16:30:20 +00:00
|
|
|
@pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
|
2009-08-20 14:50:48 +00:00
|
|
|
@hint = nil
|
|
|
|
end
|
2009-02-09 14:46:30 +00:00
|
|
|
|
2011-08-08 21:52:44 +00:00
|
|
|
# Indicate whether this is a capped collection.
|
|
|
|
#
|
|
|
|
# @raise [Mongo::OperationFailure]
|
|
|
|
# if the collection doesn't exist.
|
|
|
|
#
|
|
|
|
# @return [Boolean]
|
|
|
|
def capped?
|
|
|
|
@db.command({:collstats => @name})['capped'] == 1
|
|
|
|
end
|
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Return a sub-collection of this collection by name. If 'users' is a collection, then
|
|
|
|
# 'users.comments' is a sub-collection of users.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2011-05-09 16:28:07 +00:00
|
|
|
# @param [String, Symbol] name
|
2010-01-05 22:42:52 +00:00
|
|
|
# the collection to return
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-04-05 22:24:31 +00:00
|
|
|
# @raise [Mongo::InvalidNSName]
|
2010-01-05 22:42:52 +00:00
|
|
|
# if passed an invalid collection name
|
|
|
|
#
|
|
|
|
# @return [Collection]
|
|
|
|
# the specified sub-collection
|
2009-08-20 14:50:48 +00:00
|
|
|
def [](name)
|
|
|
|
name = "#{self.name}.#{name}"
|
2011-05-09 16:28:07 +00:00
|
|
|
return Collection.new(name, db) if !db.strict? ||
|
|
|
|
db.collection_names.include?(name.to_s)
|
2009-08-20 14:50:48 +00:00
|
|
|
raise "Collection #{name} doesn't exist. Currently in strict mode."
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Set a hint field for query optimizer. Hint may be a single field
|
|
|
|
# name, array of field names, or a hash (preferably an [OrderedHash]).
|
|
|
|
# If using MongoDB > 1.1, you probably don't ever need to set a hint.
|
|
|
|
#
|
|
|
|
# @param [String, Array, OrderedHash] hint a single field, an array of
|
|
|
|
# fields, or a hash specifying fields
|
|
|
|
def hint=(hint=nil)
|
2009-08-20 14:50:48 +00:00
|
|
|
@hint = normalize_hint_fields(hint)
|
|
|
|
self
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Query the database.
|
|
|
|
#
|
|
|
|
# The +selector+ argument is a prototype document that all results must
|
|
|
|
# match. For example:
|
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# collection.find({"hello" => "world"})
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
|
|
|
# only matches documents that have a key "hello" with value "world".
|
|
|
|
# Matches can have other keys *in addition* to "hello".
|
|
|
|
#
|
|
|
|
# If given an optional block +find+ will yield a Cursor to that block,
|
|
|
|
# close the cursor, and then return nil. This guarantees that partially
|
|
|
|
# evaluated cursors will be closed. If given no block +find+ returns a
|
|
|
|
# cursor.
|
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @param [Hash] selector
|
|
|
|
# a document specifying elements which must be present for a
|
2010-10-22 17:27:56 +00:00
|
|
|
# document to be included in the result set. Note that in rare cases,
|
|
|
|
# (e.g., with $near queries), the order of keys will matter. To preserve
|
|
|
|
# key order on a selector, use an instance of BSON::OrderedHash (only applies
|
|
|
|
# to Ruby 1.8).
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2010-03-27 15:58:16 +00:00
|
|
|
# @option opts [Array, Hash] :fields field names that should be returned in the result
|
2010-12-21 19:18:09 +00:00
|
|
|
# set ("_id" will be included unless explicity excluded). By limiting results to a certain subset of fields,
|
2010-03-27 15:58:16 +00:00
|
|
|
# you can cut down on network traffic and decoding time. If using a Hash, keys should be field
|
|
|
|
# names and values should be either 1 or 0, depending on whether you want to include or exclude
|
|
|
|
# the given field.
|
2011-09-06 18:58:03 +00:00
|
|
|
# @option opts [:primary, :secondary] :read The default read preference for queries
|
|
|
|
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
|
|
|
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
|
|
|
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
|
|
|
# preference for this Collection object will be used.
|
2010-01-05 22:42:52 +00:00
|
|
|
# @option opts [Integer] :skip number of documents to skip from the beginning of the result set
|
|
|
|
# @option opts [Integer] :limit maximum number of documents to return
|
|
|
|
# @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
|
|
|
|
# be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
|
2011-04-12 19:01:40 +00:00
|
|
|
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
|
|
|
# using MongoDB > 1.1
|
2010-11-04 21:26:21 +00:00
|
|
|
# @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query.
|
2010-01-05 22:42:52 +00:00
|
|
|
# Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and
|
2011-04-12 19:01:40 +00:00
|
|
|
# end of the query's execution.
|
|
|
|
# For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database
|
|
|
|
# @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per
|
|
|
|
# GETMORE operation. A value of 0 will let the database server decide how many results to returns.
|
|
|
|
# This option can be ignored for most use cases.
|
2010-11-04 21:26:21 +00:00
|
|
|
# @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
|
2011-04-12 19:01:40 +00:00
|
|
|
# the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
|
|
|
|
# never timeout. Note that disabling timeout will only work when #find is invoked with a block.
|
|
|
|
# This is to prevent any inadvertant failure to close the cursor, as the cursor is explicitly
|
|
|
|
# closed when block code finishes.
|
2011-05-10 19:40:06 +00:00
|
|
|
# @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries..
|
2011-05-25 20:33:33 +00:00
|
|
|
# @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging).
|
|
|
|
# @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging).
|
2011-03-23 21:36:03 +00:00
|
|
|
# @option opts [Block] :transformer (nil) a block for tranforming returned documents.
|
2011-03-23 19:30:27 +00:00
|
|
|
# This is normally used by object mappers to convert each returned document to an instance of a class.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
|
|
|
# @raise [ArgumentError]
|
|
|
|
# if timeout is set to false and find is not invoked in a block
|
|
|
|
#
|
|
|
|
# @raise [RuntimeError]
|
|
|
|
# if given unknown options
|
2010-01-28 23:44:50 +00:00
|
|
|
#
|
2010-02-02 02:55:44 +00:00
|
|
|
# @core find find-instance_method
|
2010-01-05 22:42:52 +00:00
|
|
|
def find(selector={}, opts={})
|
2011-04-26 11:46:38 +00:00
|
|
|
opts = opts.dup
|
2010-01-05 22:42:52 +00:00
|
|
|
fields = opts.delete(:fields)
|
2009-08-20 14:50:48 +00:00
|
|
|
fields = ["_id"] if fields && fields.empty?
|
2010-01-05 22:42:52 +00:00
|
|
|
skip = opts.delete(:skip) || skip || 0
|
|
|
|
limit = opts.delete(:limit) || 0
|
|
|
|
sort = opts.delete(:sort)
|
|
|
|
hint = opts.delete(:hint)
|
2010-11-04 21:26:21 +00:00
|
|
|
snapshot = opts.delete(:snapshot)
|
2010-04-12 15:57:14 +00:00
|
|
|
batch_size = opts.delete(:batch_size)
|
2010-11-04 21:26:21 +00:00
|
|
|
timeout = (opts.delete(:timeout) == false) ? false : true
|
2011-05-25 20:33:33 +00:00
|
|
|
max_scan = opts.delete(:max_scan)
|
|
|
|
return_key = opts.delete(:return_key)
|
2011-03-18 19:03:37 +00:00
|
|
|
transformer = opts.delete(:transformer)
|
2011-05-25 20:33:33 +00:00
|
|
|
show_disk_loc = opts.delete(:max_scan)
|
2011-08-29 21:49:58 +00:00
|
|
|
read = opts.delete(:read) || @read_preference
|
2010-08-24 16:49:23 +00:00
|
|
|
|
2010-11-04 21:35:58 +00:00
|
|
|
if timeout == false && !block_given?
|
2010-11-04 21:26:21 +00:00
|
|
|
raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
|
2009-10-14 18:38:44 +00:00
|
|
|
end
|
2010-08-24 16:49:23 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
if hint
|
|
|
|
hint = normalize_hint_fields(hint)
|
|
|
|
else
|
|
|
|
hint = @hint # assumed to be normalized already
|
|
|
|
end
|
2010-08-24 16:49:23 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
|
2009-08-20 14:50:48 +00:00
|
|
|
|
2011-03-18 19:03:37 +00:00
|
|
|
cursor = Cursor.new(self, {
|
2011-08-29 21:49:58 +00:00
|
|
|
:selector => selector,
|
|
|
|
:fields => fields,
|
|
|
|
:skip => skip,
|
2011-03-18 19:03:37 +00:00
|
|
|
:limit => limit,
|
2011-08-29 21:49:58 +00:00
|
|
|
:order => sort,
|
|
|
|
:hint => hint,
|
|
|
|
:snapshot => snapshot,
|
|
|
|
:timeout => timeout,
|
2011-03-18 19:03:37 +00:00
|
|
|
:batch_size => batch_size,
|
|
|
|
:transformer => transformer,
|
2011-05-25 20:33:33 +00:00
|
|
|
:max_scan => max_scan,
|
|
|
|
:show_disk_loc => show_disk_loc,
|
2011-08-29 21:49:58 +00:00
|
|
|
:return_key => return_key,
|
|
|
|
:read => read
|
2011-03-18 19:03:37 +00:00
|
|
|
})
|
2010-07-12 16:11:01 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
if block_given?
|
|
|
|
yield cursor
|
2010-11-04 21:26:21 +00:00
|
|
|
cursor.close
|
2009-08-20 14:50:48 +00:00
|
|
|
nil
|
|
|
|
else
|
|
|
|
cursor
|
|
|
|
end
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Return a single object from the database.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @return [OrderedHash, Nil]
|
|
|
|
# a single document or nil if no result is found.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-08-24 16:49:23 +00:00
|
|
|
# @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
|
2010-01-05 22:42:52 +00:00
|
|
|
# which must be present for a document to be included in the result set or an
|
2010-08-24 16:49:23 +00:00
|
|
|
# instance of ObjectId to be used as the value for an _id query.
|
2010-01-05 22:42:52 +00:00
|
|
|
# If nil, an empty selector, {}, will be used.
|
|
|
|
#
|
|
|
|
# @option opts [Hash]
|
|
|
|
# any valid options that can be send to Collection#find
|
|
|
|
#
|
|
|
|
# @raise [TypeError]
|
|
|
|
# if the argument is of an improper type.
|
|
|
|
def find_one(spec_or_object_id=nil, opts={})
|
2009-08-20 14:50:48 +00:00
|
|
|
spec = case spec_or_object_id
|
|
|
|
when nil
|
|
|
|
{}
|
2010-08-24 16:49:23 +00:00
|
|
|
when BSON::ObjectId
|
2009-08-20 14:50:48 +00:00
|
|
|
{:_id => spec_or_object_id}
|
|
|
|
when Hash
|
|
|
|
spec_or_object_id
|
|
|
|
else
|
2010-08-24 16:49:23 +00:00
|
|
|
raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2010-01-05 22:42:52 +00:00
|
|
|
find(spec, opts.merge(:limit => -1)).next_document
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Save a document to this collection.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @param [Hash] doc
|
|
|
|
# the document to be saved. If the document already has an '_id' key,
|
|
|
|
# then an update (upsert) operation will be performed, and any existing
|
|
|
|
# document with that _id is overwritten. Otherwise an insert operation is performed.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-08-24 16:49:23 +00:00
|
|
|
# @return [ObjectId] the _id of the saved document.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-05-04 20:00:05 +00:00
|
|
|
# @option opts [Boolean, Hash] :safe (+false+)
|
|
|
|
# run the operation in safe mode, which run a getlasterror command on the
|
|
|
|
# database to report any assertion. In addition, a hash can be provided to
|
|
|
|
# run an fsync and/or wait for replication of the save (>= 1.5.1). See the options
|
|
|
|
# for DB#error.
|
|
|
|
#
|
2010-05-05 15:07:52 +00:00
|
|
|
# @raise [OperationFailure] when :safe mode fails.
|
2010-05-04 20:00:05 +00:00
|
|
|
def save(doc, opts={})
|
2010-01-05 22:42:52 +00:00
|
|
|
if doc.has_key?(:_id) || doc.has_key?('_id')
|
|
|
|
id = doc[:_id] || doc['_id']
|
2010-11-16 20:43:59 +00:00
|
|
|
update({:_id => id}, doc, :upsert => true, :safe => opts.fetch(:safe, @safe))
|
2009-08-20 14:50:48 +00:00
|
|
|
id
|
|
|
|
else
|
2010-11-16 20:43:59 +00:00
|
|
|
insert(doc, :safe => opts.fetch(:safe, @safe))
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
end
|
2009-08-13 19:18:53 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Insert one or more documents into the collection.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @param [Hash, Array] doc_or_docs
|
|
|
|
# a document (as a hash) or array of documents to be inserted.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-08-24 16:49:23 +00:00
|
|
|
# @return [ObjectId, Array]
|
2010-11-09 18:07:01 +00:00
|
|
|
# The _id of the inserted document or a list of _ids of all inserted documents.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-05-04 20:00:05 +00:00
|
|
|
# @option opts [Boolean, Hash] :safe (+false+)
|
|
|
|
# run the operation in safe mode, which run a getlasterror command on the
|
|
|
|
# database to report any assertion. In addition, a hash can be provided to
|
2010-11-03 21:36:08 +00:00
|
|
|
# run an fsync and/or wait for replication of the insert (>= 1.5.1). Safe
|
|
|
|
# options provided here will override any safe options set on this collection,
|
|
|
|
# its database object, or the current connection. See the options on
|
|
|
|
# for DB#get_last_error.
|
2010-05-04 20:00:05 +00:00
|
|
|
#
|
2011-08-29 16:04:01 +00:00
|
|
|
# @option opts [Boolean] :continue_on_error (+false+) If true, then
|
|
|
|
# continue a bulk insert even if one of the documents inserted
|
|
|
|
# triggers a database assertion (as in a duplicate insert, for instance).
|
|
|
|
# MongoDB v2.0+.
|
|
|
|
#
|
2010-02-08 17:12:18 +00:00
|
|
|
# @core insert insert-instance_method
|
2011-01-05 16:30:20 +00:00
|
|
|
def insert(doc_or_docs, opts={})
|
2009-10-19 21:14:41 +00:00
|
|
|
doc_or_docs = [doc_or_docs] unless doc_or_docs.is_a?(Array)
|
|
|
|
doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
|
2011-01-05 16:30:20 +00:00
|
|
|
safe = opts.fetch(:safe, @safe)
|
2011-08-29 16:04:01 +00:00
|
|
|
result = insert_documents(doc_or_docs, @name, true, safe, opts)
|
2009-10-19 21:14:41 +00:00
|
|
|
result.size > 1 ? result : result.first
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
alias_method :<<, :insert
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Remove all documents from this collection.
|
|
|
|
#
|
|
|
|
# @param [Hash] selector
|
|
|
|
# If specified, only matching documents will be removed.
|
2009-12-08 22:52:07 +00:00
|
|
|
#
|
2010-05-04 20:00:05 +00:00
|
|
|
# @option opts [Boolean, Hash] :safe (+false+)
|
2010-11-03 21:36:08 +00:00
|
|
|
# run the operation in safe mode, which will run a getlasterror command on the
|
2010-05-04 20:00:05 +00:00
|
|
|
# database to report any assertion. In addition, a hash can be provided to
|
2010-11-03 21:36:08 +00:00
|
|
|
# run an fsync and/or wait for replication of the remove (>= 1.5.1). Safe
|
|
|
|
# options provided here will override any safe options set on this collection,
|
|
|
|
# its database, or the current connection. See the options for DB#get_last_error for more details.
|
2010-01-19 16:11:48 +00:00
|
|
|
#
|
2010-01-08 20:43:13 +00:00
|
|
|
# @example remove all documents from the 'users' collection:
|
|
|
|
# users.remove
|
|
|
|
# users.remove({})
|
2009-10-20 15:31:07 +00:00
|
|
|
#
|
2010-01-08 20:43:13 +00:00
|
|
|
# @example remove only documents that have expired:
|
|
|
|
# users.remove({:expire => {"$lte" => Time.now}})
|
2010-01-19 16:11:48 +00:00
|
|
|
#
|
2010-11-09 18:07:01 +00:00
|
|
|
# @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
|
|
|
|
# Otherwise, returns true.
|
2010-01-19 16:11:48 +00:00
|
|
|
#
|
|
|
|
# @raise [Mongo::OperationFailure] an exception will be raised iff safe mode is enabled
|
|
|
|
# and the operation fails.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core remove remove-instance_method
|
2010-01-19 16:11:48 +00:00
|
|
|
def remove(selector={}, opts={})
|
2010-01-14 18:30:38 +00:00
|
|
|
# Initial byte is 0.
|
2011-01-05 14:34:09 +00:00
|
|
|
safe = opts.fetch(:safe, @safe)
|
2010-09-12 20:24:20 +00:00
|
|
|
message = BSON::ByteBuffer.new("\0\0\0\0")
|
2010-04-05 14:39:55 +00:00
|
|
|
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
2009-10-19 21:14:41 +00:00
|
|
|
message.put_int(0)
|
2011-08-25 18:57:24 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s)
|
2010-01-19 16:11:48 +00:00
|
|
|
|
2011-09-07 21:14:53 +00:00
|
|
|
instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
|
2011-01-29 06:20:41 +00:00
|
|
|
if safe
|
|
|
|
@connection.send_message_with_safe_check(Mongo::Constants::OP_DELETE, message, @db.name, nil, safe)
|
|
|
|
else
|
|
|
|
@connection.send_message(Mongo::Constants::OP_DELETE, message)
|
|
|
|
true
|
|
|
|
end
|
2010-01-16 22:20:33 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-11-09 18:07:01 +00:00
|
|
|
# Update one or more documents in this collection.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @param [Hash] selector
|
|
|
|
# a hash specifying elements which must be present for a document to be updated. Note:
|
|
|
|
# the update command currently updates only the first document matching the
|
|
|
|
# given selector. If you want all matching documents to be updated, be sure
|
|
|
|
# to specify :multi => true.
|
|
|
|
# @param [Hash] document
|
|
|
|
# a hash specifying the fields to be changed in the selected document,
|
|
|
|
# or (in the case of an upsert) the document to be inserted
|
|
|
|
#
|
2011-01-04 23:08:29 +00:00
|
|
|
# @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert)
|
|
|
|
# @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
|
2010-01-05 22:42:52 +00:00
|
|
|
# just the first matching document. Note: only works in MongoDB 1.1.3 or later.
|
|
|
|
# @option opts [Boolean] :safe (+false+)
|
|
|
|
# If true, check that the save succeeded. OperationFailure
|
|
|
|
# will be raised on an error. Note that a safe check requires an extra
|
2010-11-03 21:36:08 +00:00
|
|
|
# round-trip to the database. Safe options provided here will override any safe
|
|
|
|
# options set on this collection, its database object, or the current collection.
|
|
|
|
# See the options for DB#get_last_error for details.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
2010-11-09 18:07:01 +00:00
|
|
|
# @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
|
|
|
|
# Otherwise, returns true.
|
|
|
|
#
|
2010-02-08 17:12:18 +00:00
|
|
|
# @core update update-instance_method
|
2011-01-04 23:08:29 +00:00
|
|
|
def update(selector, document, opts={})
|
2010-01-14 18:30:38 +00:00
|
|
|
# Initial byte is 0.
|
2011-01-04 23:08:29 +00:00
|
|
|
safe = opts.fetch(:safe, @safe)
|
2010-09-12 20:24:20 +00:00
|
|
|
message = BSON::ByteBuffer.new("\0\0\0\0")
|
2010-04-05 14:39:55 +00:00
|
|
|
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
2009-11-05 21:08:54 +00:00
|
|
|
update_options = 0
|
2011-01-04 23:08:29 +00:00
|
|
|
update_options += 1 if opts[:upsert]
|
|
|
|
update_options += 2 if opts[:multi]
|
2009-11-05 21:08:54 +00:00
|
|
|
message.put_int(update_options)
|
2010-09-12 20:24:20 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
|
2011-08-25 18:57:24 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(document, false, true, @connection.max_bson_size).to_s)
|
2011-01-29 06:20:41 +00:00
|
|
|
|
2011-09-07 21:14:53 +00:00
|
|
|
instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
|
2011-01-29 06:20:41 +00:00
|
|
|
if safe
|
|
|
|
@connection.send_message_with_safe_check(Mongo::Constants::OP_UPDATE, message, @db.name, nil, safe)
|
|
|
|
else
|
2011-05-10 18:21:23 +00:00
|
|
|
@connection.send_message(Mongo::Constants::OP_UPDATE, message)
|
2011-01-29 06:20:41 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Create a new index.
|
|
|
|
#
|
2010-03-19 18:31:10 +00:00
|
|
|
# @param [String, Array] spec
|
2010-01-05 22:42:52 +00:00
|
|
|
# should be either a single field name or an array of
|
2010-03-16 19:59:26 +00:00
|
|
|
# [field name, direction] pairs. Directions should be specified
|
|
|
|
# as Mongo::ASCENDING, Mongo::DESCENDING, or Mongo::GEO2D.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2010-03-19 18:31:10 +00:00
|
|
|
# Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too,
|
|
|
|
# that in order to geo-index a given field, that field must reference either an array or a sub-object
|
2010-03-16 19:59:26 +00:00
|
|
|
# where the first two values represent x- and y-coordinates. Examples can be seen below.
|
|
|
|
#
|
2010-03-19 18:31:10 +00:00
|
|
|
# Also note that it is permissible to create compound indexes that include a geospatial index as
|
|
|
|
# long as the geospatial index comes first.
|
|
|
|
#
|
2010-11-11 22:41:31 +00:00
|
|
|
# If your code calls create_index frequently, you can use Collection#ensure_index to cache these calls
|
|
|
|
# and thereby prevent excessive round trips to the database.
|
|
|
|
#
|
2010-03-16 19:59:26 +00:00
|
|
|
# @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint.
|
|
|
|
# @option opts [Boolean] :background (false) indicate that the index should be built in the background. This
|
|
|
|
# feature is only available in MongoDB >= 1.3.2.
|
2010-11-02 18:50:02 +00:00
|
|
|
# @option opts [Boolean] :drop_dups (nil) If creating a unique index on a collection with pre-existing records,
|
2010-03-16 19:59:26 +00:00
|
|
|
# this option will keep the first document the database indexes and drop all subsequent with duplicate values.
|
2010-11-02 18:50:02 +00:00
|
|
|
# @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index.
|
|
|
|
# @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index.
|
2010-03-16 19:59:26 +00:00
|
|
|
#
|
|
|
|
# @example Creating a compound index:
|
|
|
|
# @posts.create_index([['subject', Mongo::ASCENDING], ['created_at', Mongo::DESCENDING]])
|
|
|
|
#
|
|
|
|
# @example Creating a geospatial index:
|
2010-03-19 18:31:10 +00:00
|
|
|
# @restaurants.create_index([['location', Mongo::GEO2D]])
|
2010-03-16 19:59:26 +00:00
|
|
|
#
|
|
|
|
# # Note that this will work only if 'location' represents x,y coordinates:
|
|
|
|
# {'location': [0, 50]}
|
|
|
|
# {'location': {'x' => 0, 'y' => 50}}
|
|
|
|
# {'location': {'latitude' => 0, 'longitude' => 50}}
|
|
|
|
#
|
|
|
|
# @example A geospatial index with alternate longitude and latitude:
|
2010-03-19 18:31:10 +00:00
|
|
|
# @restaurants.create_index([['location', Mongo::GEO2D]], :min => 500, :max => 500)
|
2010-01-11 23:11:38 +00:00
|
|
|
#
|
|
|
|
# @return [String] the name of the index created.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core indexes create_index-instance_method
|
2010-03-19 18:31:10 +00:00
|
|
|
def create_index(spec, opts={})
|
2011-04-26 11:46:38 +00:00
|
|
|
opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
|
2010-11-10 02:28:07 +00:00
|
|
|
field_spec = parse_index_spec(spec)
|
2011-04-26 11:46:38 +00:00
|
|
|
opts = opts.dup
|
2010-06-14 18:20:12 +00:00
|
|
|
name = opts.delete(:name) || generate_index_name(field_spec)
|
2011-01-03 21:16:24 +00:00
|
|
|
name = name.to_s if name
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2010-11-10 02:28:07 +00:00
|
|
|
generate_indexes(field_spec, name, opts)
|
|
|
|
name
|
|
|
|
end
|
2010-04-28 19:16:33 +00:00
|
|
|
|
2010-11-10 02:28:07 +00:00
|
|
|
# Calls create_index and sets a flag to not do so again for another X minutes.
|
2010-11-11 22:41:31 +00:00
|
|
|
# this time can be specified as an option when initializing a Mongo::DB object as options[:cache_time]
|
|
|
|
# Any changes to an index will be propogated through regardless of cache time (e.g., a change of index direction)
|
|
|
|
#
|
|
|
|
# The parameters and options for this methods are the same as those for Collection#create_index.
|
|
|
|
#
|
2010-11-10 02:28:07 +00:00
|
|
|
# @example Call sequence:
|
2010-11-11 22:41:31 +00:00
|
|
|
# Time t: @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and
|
|
|
|
# sets the 5 minute cache
|
2010-11-10 02:28:07 +00:00
|
|
|
# Time t+2min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- doesn't do anything
|
2010-11-11 22:41:31 +00:00
|
|
|
# Time t+3min : @posts.ensure_index([['something_else', Mongo::ASCENDING]) -- calls create_index
|
|
|
|
# and sets 5 minute cache
|
|
|
|
# Time t+10min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and
|
|
|
|
# resets the 5 minute counter
|
|
|
|
#
|
|
|
|
# @return [String] the name of the index.
|
2010-11-10 02:28:07 +00:00
|
|
|
def ensure_index(spec, opts={})
|
|
|
|
now = Time.now.utc.to_i
|
2011-05-19 19:14:38 +00:00
|
|
|
opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
|
2010-11-10 02:28:07 +00:00
|
|
|
field_spec = parse_index_spec(spec)
|
|
|
|
|
2011-04-26 11:46:38 +00:00
|
|
|
name = opts[:name] || generate_index_name(field_spec)
|
2011-01-03 21:16:24 +00:00
|
|
|
name = name.to_s if name
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2011-01-03 21:16:24 +00:00
|
|
|
if !@cache[name] || @cache[name] <= now
|
|
|
|
generate_indexes(field_spec, name, opts)
|
|
|
|
end
|
2010-11-11 22:41:31 +00:00
|
|
|
|
|
|
|
# Reset the cache here in case there are any errors inserting. Best to be safe.
|
2010-12-02 16:24:21 +00:00
|
|
|
@cache[name] = now + @cache_time
|
2009-10-19 21:14:41 +00:00
|
|
|
name
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Drop a specified index.
|
|
|
|
#
|
|
|
|
# @param [String] name
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core indexes
|
2009-08-20 14:50:48 +00:00
|
|
|
def drop_index(name)
|
2011-05-31 11:52:50 +00:00
|
|
|
if name.is_a?(Array)
|
|
|
|
return drop_index(index_name(name))
|
|
|
|
end
|
2011-01-03 21:16:24 +00:00
|
|
|
@cache[name.to_s] = nil
|
2009-08-20 14:50:48 +00:00
|
|
|
@db.drop_index(@name, name)
|
|
|
|
end
|
2009-01-15 16:25:23 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Drop all indexes.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core indexes
|
2009-08-20 14:50:48 +00:00
|
|
|
def drop_indexes
|
2010-11-10 02:28:07 +00:00
|
|
|
@cache = {}
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Note: calling drop_indexes with no args will drop them all.
|
2009-08-20 14:50:48 +00:00
|
|
|
@db.drop_index(@name, '*')
|
|
|
|
end
|
|
|
|
|
|
|
|
# Drop the entire collection. USE WITH CAUTION.
|
|
|
|
def drop
|
|
|
|
@db.drop_collection(@name)
|
|
|
|
end
|
|
|
|
|
2010-04-06 21:56:21 +00:00
|
|
|
# Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0)
|
|
|
|
#
|
|
|
|
# @option opts [Hash] :query ({}) a query selector document for matching the desired document.
|
2010-06-06 02:45:37 +00:00
|
|
|
# @option opts [Hash] :update (nil) the update operation to perform on the matched document.
|
2010-04-06 21:56:21 +00:00
|
|
|
# @option opts [Array, String, OrderedHash] :sort ({}) specify a sort option for the query using any
|
|
|
|
# of the sort options available for Cursor#sort. Sort order is important if the query will be matching
|
|
|
|
# multiple documents since only the first matching document will be updated and returned.
|
|
|
|
# @option opts [Boolean] :remove (false) If true, removes the the returned document from the collection.
|
|
|
|
# @option opts [Boolean] :new (false) If true, returns the updated document; otherwise, returns the document
|
|
|
|
# prior to update.
|
|
|
|
#
|
|
|
|
# @return [Hash] the matched document.
|
|
|
|
#
|
2010-04-06 22:34:07 +00:00
|
|
|
# @core findandmodify find_and_modify-instance_method
|
2010-04-06 21:56:21 +00:00
|
|
|
def find_and_modify(opts={})
|
2010-05-07 01:25:18 +00:00
|
|
|
cmd = BSON::OrderedHash.new
|
2010-04-06 21:56:21 +00:00
|
|
|
cmd[:findandmodify] = @name
|
|
|
|
cmd.merge!(opts)
|
|
|
|
cmd[:sort] = Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
|
|
|
|
|
2010-05-18 20:17:17 +00:00
|
|
|
@db.command(cmd)['value']
|
2010-04-06 21:56:21 +00:00
|
|
|
end
|
|
|
|
|
2011-02-23 19:43:23 +00:00
|
|
|
# Perform a map-reduce operation on the current collection.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2010-04-05 14:39:55 +00:00
|
|
|
# @param [String, BSON::Code] map a map function, written in JavaScript.
|
|
|
|
# @param [String, BSON::Code] reduce a reduce function, written in JavaScript.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
|
|
|
# @option opts [Hash] :query ({}) a query selector document, like what's passed to #find, to limit
|
|
|
|
# the operation to a subset of the collection.
|
|
|
|
# @option opts [Array] :sort ([]) an array of [key, direction] pairs to sort by. Direction should
|
|
|
|
# be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
|
|
|
|
# @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection.
|
2010-04-05 14:39:55 +00:00
|
|
|
# @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the
|
2010-01-05 22:42:52 +00:00
|
|
|
# map/reduce operation has finished.
|
2011-02-22 22:47:47 +00:00
|
|
|
# @option opts [String] :out (nil) a valid output type. In versions of MongoDB prior to v1.7.6,
|
|
|
|
# this option takes the name of a collection for the output results. In versions 1.7.6 and later,
|
|
|
|
# this option specifies the output type. See the core docs for available output types.
|
|
|
|
# @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The defualt
|
|
|
|
# is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
|
2010-01-05 22:42:52 +00:00
|
|
|
# @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
|
2010-08-17 14:08:48 +00:00
|
|
|
# @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
|
2011-02-22 22:47:47 +00:00
|
|
|
# the instantiated collection that's returned by default. Note if a collection name isn't returned in the
|
2011-09-06 20:36:45 +00:00
|
|
|
# map-reduce output (as, for example, when using :out => { :inline => 1 }), then you must specify this option
|
2011-02-23 19:43:23 +00:00
|
|
|
# or an ArgumentError will be raised.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2011-02-22 22:47:47 +00:00
|
|
|
# @return [Collection, Hash] a Mongo::Collection object or a Hash with the map-reduce command's results.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2011-09-06 20:36:45 +00:00
|
|
|
# @raise ArgumentError if you specify { :out => { :inline => true }} but don't specify :raw => true.
|
2011-02-23 19:43:23 +00:00
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core mapreduce map_reduce-instance_method
|
2010-01-05 22:42:52 +00:00
|
|
|
def map_reduce(map, reduce, opts={})
|
2010-04-05 14:39:55 +00:00
|
|
|
map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
|
|
|
|
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
2011-05-23 01:17:08 +00:00
|
|
|
raw = opts.delete(:raw)
|
2009-11-23 08:04:48 +00:00
|
|
|
|
2010-05-07 01:25:18 +00:00
|
|
|
hash = BSON::OrderedHash.new
|
2009-11-23 08:04:48 +00:00
|
|
|
hash['mapreduce'] = self.name
|
|
|
|
hash['map'] = map
|
|
|
|
hash['reduce'] = reduce
|
2010-01-05 22:42:52 +00:00
|
|
|
hash.merge! opts
|
2011-09-19 15:46:15 +00:00
|
|
|
if hash[:sort]
|
|
|
|
hash[:sort] = Mongo::Support.format_order_clause(hash[:sort])
|
|
|
|
end
|
2009-11-23 08:04:48 +00:00
|
|
|
|
2009-11-25 16:25:50 +00:00
|
|
|
result = @db.command(hash)
|
2010-07-09 16:53:22 +00:00
|
|
|
unless Mongo::Support.ok?(result)
|
2009-12-08 22:52:07 +00:00
|
|
|
raise Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"
|
2009-11-23 08:04:48 +00:00
|
|
|
end
|
2010-07-09 19:29:11 +00:00
|
|
|
|
2011-02-23 19:43:23 +00:00
|
|
|
if raw
|
2010-07-09 19:29:11 +00:00
|
|
|
result
|
2011-02-23 19:43:23 +00:00
|
|
|
elsif result["result"]
|
2010-07-09 19:29:11 +00:00
|
|
|
@db[result["result"]]
|
2011-02-23 19:43:23 +00:00
|
|
|
else
|
|
|
|
raise ArgumentError, "Could not instantiate collection from result. If you specified " +
|
|
|
|
"{:out => {:inline => true}}, then you must also specify :raw => true to get the results."
|
2010-07-09 19:29:11 +00:00
|
|
|
end
|
2009-11-23 08:04:48 +00:00
|
|
|
end
|
2009-11-25 16:25:50 +00:00
|
|
|
alias :mapreduce :map_reduce
|
2009-11-23 08:04:48 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Perform a group aggregation.
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2011-01-06 15:05:19 +00:00
|
|
|
# @param [Hash] opts the options for this group operation. The minimum required are :initial
|
|
|
|
# and :reduce.
|
|
|
|
#
|
|
|
|
# @option opts [Array, String, Symbol] :key (nil) Either the name of a field or a list of fields to group by (optional).
|
|
|
|
# @option opts [String, BSON::Code] :keyf (nil) A JavaScript function to be used to generate the grouping keys (optional).
|
|
|
|
# @option opts [String, BSON::Code] :cond ({}) A document specifying a query for filtering the documents over
|
|
|
|
# which the aggregation is run (optional).
|
|
|
|
# @option opts [Hash] :initial the initial value of the aggregation counter object (required).
|
|
|
|
# @option opts [String, BSON::Code] :reduce (nil) a JavaScript aggregation function (required).
|
|
|
|
# @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies
|
|
|
|
# each of the resultant grouped objects. Available only when group is run with command
|
|
|
|
# set to true.
|
|
|
|
#
|
|
|
|
# @return [Array] the command response consisting of grouped items.
|
2011-01-21 20:43:20 +00:00
|
|
|
def group(opts, condition={}, initial={}, reduce=nil, finalize=nil)
|
|
|
|
if opts.is_a?(Hash)
|
|
|
|
return new_group(opts)
|
2011-01-06 15:05:19 +00:00
|
|
|
else
|
2011-09-06 18:30:00 +00:00
|
|
|
warn "Collection#group no longer take a list of parameters. This usage is deprecated and will be remove in v2.0." +
|
2011-01-06 15:05:19 +00:00
|
|
|
"Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
|
|
|
|
end
|
|
|
|
|
2010-04-05 14:39:55 +00:00
|
|
|
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
2009-12-06 23:45:42 +00:00
|
|
|
|
2010-02-23 17:42:17 +00:00
|
|
|
group_command = {
|
|
|
|
"group" => {
|
|
|
|
"ns" => @name,
|
|
|
|
"$reduce" => reduce,
|
|
|
|
"cond" => condition,
|
|
|
|
"initial" => initial
|
2009-12-06 23:45:42 +00:00
|
|
|
}
|
2010-02-23 17:42:17 +00:00
|
|
|
}
|
2009-12-06 23:45:42 +00:00
|
|
|
|
2011-01-21 20:43:20 +00:00
|
|
|
if opts.is_a?(Symbol)
|
2011-01-06 15:05:19 +00:00
|
|
|
raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
|
|
|
|
"in the form of a String or BSON::Code."
|
|
|
|
end
|
|
|
|
|
2011-01-21 20:43:20 +00:00
|
|
|
unless opts.nil?
|
|
|
|
if opts.is_a? Array
|
2010-02-23 17:42:17 +00:00
|
|
|
key_type = "key"
|
|
|
|
key_value = {}
|
2011-01-21 20:56:35 +00:00
|
|
|
opts.each { |k| key_value[k] = 1 }
|
2009-08-20 14:50:48 +00:00
|
|
|
else
|
2010-02-23 17:42:17 +00:00
|
|
|
key_type = "$keyf"
|
2011-01-21 20:56:35 +00:00
|
|
|
key_value = opts.is_a?(BSON::Code) ? opts : BSON::Code.new(opts)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-08-26 15:13:40 +00:00
|
|
|
|
2010-02-23 17:42:17 +00:00
|
|
|
group_command["group"][key_type] = key_value
|
|
|
|
end
|
2009-12-18 19:48:44 +00:00
|
|
|
|
2010-04-05 14:39:55 +00:00
|
|
|
finalize = BSON::Code.new(finalize) if finalize.is_a?(String)
|
|
|
|
if finalize.is_a?(BSON::Code)
|
2010-02-23 17:42:17 +00:00
|
|
|
group_command['group']['finalize'] = finalize
|
|
|
|
end
|
2009-12-18 19:48:44 +00:00
|
|
|
|
2010-05-18 20:17:17 +00:00
|
|
|
result = @db.command(group_command)
|
2009-04-27 18:19:38 +00:00
|
|
|
|
2010-06-01 02:52:03 +00:00
|
|
|
if Mongo::Support.ok?(result)
|
2010-02-23 17:42:17 +00:00
|
|
|
result["retval"]
|
|
|
|
else
|
|
|
|
raise OperationFailure, "group command failed: #{result['errmsg']}"
|
2009-12-18 19:48:44 +00:00
|
|
|
end
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2009-04-27 18:19:38 +00:00
|
|
|
|
2011-08-29 21:49:58 +00:00
|
|
|
# The value of the read preference. This will be
|
|
|
|
# either +:primary+, +:secondary+, or an object
|
|
|
|
# representing the tags to be read from.
|
|
|
|
def read_preference
|
|
|
|
@read_preference
|
|
|
|
end
|
|
|
|
|
2011-01-06 15:05:19 +00:00
|
|
|
private
|
|
|
|
|
|
|
|
def new_group(opts={})
|
|
|
|
reduce = opts[:reduce]
|
|
|
|
finalize = opts[:finalize]
|
|
|
|
cond = opts.fetch(:cond, {})
|
|
|
|
initial = opts[:initial]
|
|
|
|
|
|
|
|
if !(reduce && initial)
|
|
|
|
raise MongoArgumentError, "Group requires at minimum values for initial and reduce."
|
|
|
|
end
|
|
|
|
|
|
|
|
cmd = {
|
|
|
|
"group" => {
|
|
|
|
"ns" => @name,
|
|
|
|
"$reduce" => reduce.to_bson_code,
|
|
|
|
"cond" => cond,
|
|
|
|
"initial" => initial
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if finalize
|
|
|
|
cmd['group']['finalize'] = finalize.to_bson_code
|
|
|
|
end
|
|
|
|
|
|
|
|
if key = opts[:key]
|
|
|
|
if key.is_a?(String) || key.is_a?(Symbol)
|
|
|
|
key = [key]
|
|
|
|
end
|
|
|
|
key_value = {}
|
|
|
|
key.each { |k| key_value[k] = 1 }
|
|
|
|
cmd["group"]["key"] = key_value
|
|
|
|
elsif keyf = opts[:keyf]
|
|
|
|
cmd["group"]["$keyf"] = keyf.to_bson_code
|
|
|
|
end
|
|
|
|
|
|
|
|
result = @db.command(cmd)
|
|
|
|
result["retval"]
|
|
|
|
end
|
|
|
|
|
|
|
|
public
|
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Return a list of distinct values for +key+ across all
|
2009-10-27 18:05:45 +00:00
|
|
|
# documents in the collection. The key may use dot notation
|
|
|
|
# to reach into an embedded object.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
|
|
|
# @param [String, Symbol, OrderedHash] key or hash to group by.
|
|
|
|
# @param [Hash] query a selector for limiting the result set over which to group.
|
|
|
|
#
|
|
|
|
# @example Saving zip codes and ages and returning distinct results.
|
2009-10-27 18:05:45 +00:00
|
|
|
# @collection.save({:zip => 10010, :name => {:age => 27}})
|
|
|
|
# @collection.save({:zip => 94108, :name => {:age => 24}})
|
|
|
|
# @collection.save({:zip => 10010, :name => {:age => 27}})
|
|
|
|
# @collection.save({:zip => 99701, :name => {:age => 24}})
|
|
|
|
# @collection.save({:zip => 94108, :name => {:age => 27}})
|
|
|
|
#
|
|
|
|
# @collection.distinct(:zip)
|
|
|
|
# [10010, 94108, 99701]
|
|
|
|
# @collection.distinct("name.age")
|
|
|
|
# [27, 24]
|
2009-12-14 18:57:22 +00:00
|
|
|
#
|
2010-01-08 20:43:13 +00:00
|
|
|
# # You may also pass a document selector as the second parameter
|
|
|
|
# # to limit the documents over which distinct is run:
|
2009-12-14 18:57:22 +00:00
|
|
|
# @collection.distinct("name.age", {"name.age" => {"$gt" => 24}})
|
|
|
|
# [27]
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
|
|
|
# @return [Array] an array of distinct values.
|
2009-12-14 18:57:22 +00:00
|
|
|
def distinct(key, query=nil)
|
2009-10-27 18:05:45 +00:00
|
|
|
raise MongoArgumentError unless [String, Symbol].include?(key.class)
|
2010-05-07 01:25:18 +00:00
|
|
|
command = BSON::OrderedHash.new
|
2009-10-27 18:05:45 +00:00
|
|
|
command[:distinct] = @name
|
2009-12-14 18:57:22 +00:00
|
|
|
command[:key] = key.to_s
|
|
|
|
command[:query] = query
|
2009-10-27 18:05:45 +00:00
|
|
|
|
2009-11-23 20:20:05 +00:00
|
|
|
@db.command(command)["values"]
|
2009-10-27 18:05:45 +00:00
|
|
|
end
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Rename this collection.
|
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# Note: If operating in auth mode, the client must be authorized as an admin to
|
2010-11-09 18:34:28 +00:00
|
|
|
# perform this operation.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2010-02-12 23:03:07 +00:00
|
|
|
# @param [String] new_name the new name for this collection
|
2009-08-20 14:50:48 +00:00
|
|
|
#
|
2010-11-09 18:45:33 +00:00
|
|
|
# @return [String] the name of the new collection.
|
|
|
|
#
|
2010-04-05 22:24:31 +00:00
|
|
|
# @raise [Mongo::InvalidNSName] if +new_name+ is an invalid collection name.
|
2009-08-20 14:50:48 +00:00
|
|
|
def rename(new_name)
|
|
|
|
case new_name
|
|
|
|
when Symbol, String
|
|
|
|
else
|
|
|
|
raise TypeError, "new_name must be a string or symbol"
|
|
|
|
end
|
2009-08-04 18:16:02 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
new_name = new_name.to_s
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
if new_name.empty? or new_name.include? ".."
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names cannot be empty"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
if new_name.include? "$"
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names must not contain '$'"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
if new_name.match(/^\./) or new_name.match(/\.$/)
|
2010-04-05 22:24:31 +00:00
|
|
|
raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-12-17 18:52:10 +00:00
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
@db.rename_collection(@name, new_name)
|
2010-11-09 18:45:33 +00:00
|
|
|
@name = new_name
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
|
2010-01-05 22:42:52 +00:00
|
|
|
# Get information on the indexes for this collection.
|
|
|
|
#
|
|
|
|
# @return [Hash] a hash where the keys are index names.
|
2010-02-08 17:12:18 +00:00
|
|
|
#
|
|
|
|
# @core indexes
|
2009-08-20 14:50:48 +00:00
|
|
|
def index_information
|
|
|
|
@db.index_information(@name)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Return a hash containing options that apply to this collection.
|
2010-01-05 22:42:52 +00:00
|
|
|
# For all possible keys and values, see DB#create_collection.
|
|
|
|
#
|
|
|
|
# @return [Hash] options that apply to this collection.
|
2009-08-20 14:50:48 +00:00
|
|
|
def options
|
2009-12-16 19:03:15 +00:00
|
|
|
@db.collections_info(@name).next_document['options']
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
|
2010-04-06 22:29:39 +00:00
|
|
|
# Return stats on the collection. Uses MongoDB's collstats command.
|
|
|
|
#
|
|
|
|
# @return [Hash]
|
|
|
|
def stats
|
|
|
|
@db.command({:collstats => @name})
|
|
|
|
end
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
# Get the number of documents in this collection.
|
2010-01-05 22:42:52 +00:00
|
|
|
#
|
2011-09-06 15:38:19 +00:00
|
|
|
# @option opts [Hash] :query ({}) A query selector for filtering the documents counted.
|
|
|
|
# @option opts [Integer] :skip (nil) The number of documents to skip.
|
|
|
|
# @option opts [Integer] :limit (nil) The number of documents to limit.
|
|
|
|
#
|
2010-01-05 22:42:52 +00:00
|
|
|
# @return [Integer]
|
2011-09-06 15:38:19 +00:00
|
|
|
def count(opts={})
|
|
|
|
find(opts[:query],
|
|
|
|
:skip => opts[:skip],
|
|
|
|
:limit => opts[:limit]).count(true)
|
2009-08-20 14:50:48 +00:00
|
|
|
end
|
|
|
|
|
2009-10-13 20:20:41 +00:00
|
|
|
alias :size :count
|
|
|
|
|
2009-08-20 14:50:48 +00:00
|
|
|
protected
|
|
|
|
|
|
|
|
def normalize_hint_fields(hint)
|
|
|
|
case hint
|
|
|
|
when String
|
|
|
|
{hint => 1}
|
|
|
|
when Hash
|
|
|
|
hint
|
|
|
|
when nil
|
|
|
|
nil
|
|
|
|
else
|
2010-05-07 01:25:18 +00:00
|
|
|
h = BSON::OrderedHash.new
|
2009-08-20 14:50:48 +00:00
|
|
|
hint.to_a.each { |k| h[k] = 1 }
|
|
|
|
h
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|
|
|
|
end
|
2009-10-19 21:14:41 +00:00
|
|
|
|
|
|
|
private
|
2011-08-29 16:04:01 +00:00
|
|
|
|
2011-05-31 11:52:50 +00:00
|
|
|
def index_name(spec)
|
|
|
|
field_spec = parse_index_spec(spec)
|
|
|
|
index_information.each do |index|
|
|
|
|
return index[0] if index[1]['key'] == field_spec
|
|
|
|
end
|
|
|
|
nil
|
|
|
|
end
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2010-11-10 02:28:07 +00:00
|
|
|
def parse_index_spec(spec)
|
|
|
|
field_spec = BSON::OrderedHash.new
|
|
|
|
if spec.is_a?(String) || spec.is_a?(Symbol)
|
|
|
|
field_spec[spec.to_s] = 1
|
|
|
|
elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
|
|
|
|
spec.each do |f|
|
|
|
|
if [Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D].include?(f[1])
|
|
|
|
field_spec[f[0].to_s] = f[1]
|
|
|
|
else
|
|
|
|
raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " +
|
|
|
|
"should be one of Mongo::ASCENDING (1), Mongo::DESCENDING (-1) or Mongo::GEO2D ('2d')."
|
|
|
|
end
|
|
|
|
end
|
|
|
|
else
|
|
|
|
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
|
|
|
"should be either a string, symbol, or an array of arrays."
|
|
|
|
end
|
|
|
|
field_spec
|
|
|
|
end
|
2010-11-11 22:41:31 +00:00
|
|
|
|
2010-11-10 02:28:07 +00:00
|
|
|
def generate_indexes(field_spec, name, opts)
|
|
|
|
selector = {
|
|
|
|
:name => name,
|
|
|
|
:ns => "#{@db.name}.#{@name}",
|
|
|
|
:key => field_spec
|
|
|
|
}
|
|
|
|
selector.merge!(opts)
|
|
|
|
|
|
|
|
begin
|
|
|
|
insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, true)
|
|
|
|
|
|
|
|
rescue Mongo::OperationFailure => e
|
|
|
|
if selector[:dropDups] && e.message =~ /^11000/
|
|
|
|
# NOP. If the user is intentionally dropping dups, we can ignore duplicate key errors.
|
|
|
|
else
|
|
|
|
raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
|
|
|
|
"#{e.message}"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
nil
|
|
|
|
end
|
2009-10-19 21:14:41 +00:00
|
|
|
|
2009-12-16 19:03:15 +00:00
|
|
|
# Sends a Mongo::Constants::OP_INSERT message to the database.
|
2009-10-19 21:14:41 +00:00
|
|
|
# Takes an array of +documents+, an optional +collection_name+, and a
|
|
|
|
# +check_keys+ setting.
|
2011-08-29 16:04:01 +00:00
|
|
|
def insert_documents(documents, collection_name=@name, check_keys=true, safe=false, flags={})
|
|
|
|
if flags[:continue_on_error]
|
|
|
|
message = BSON::ByteBuffer.new
|
|
|
|
message.put_int(1)
|
|
|
|
else
|
|
|
|
message = BSON::ByteBuffer.new("\0\0\0\0")
|
|
|
|
end
|
2010-04-05 14:39:55 +00:00
|
|
|
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
|
2010-09-12 20:24:20 +00:00
|
|
|
documents.each do |doc|
|
2011-08-25 18:57:24 +00:00
|
|
|
message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true, @connection.max_bson_size).to_s)
|
2010-09-12 20:24:20 +00:00
|
|
|
end
|
2010-06-15 14:24:31 +00:00
|
|
|
raise InvalidOperation, "Exceded maximum insert size of 16,000,000 bytes" if message.size > 16_000_000
|
|
|
|
|
2011-09-07 21:14:53 +00:00
|
|
|
instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
|
2011-01-29 06:20:41 +00:00
|
|
|
if safe
|
|
|
|
@connection.send_message_with_safe_check(Mongo::Constants::OP_INSERT, message, @db.name, nil, safe)
|
|
|
|
else
|
2011-05-10 18:21:23 +00:00
|
|
|
@connection.send_message(Mongo::Constants::OP_INSERT, message)
|
2011-01-29 06:20:41 +00:00
|
|
|
end
|
2009-11-05 20:14:48 +00:00
|
|
|
end
|
2009-10-19 21:14:41 +00:00
|
|
|
documents.collect { |o| o[:_id] || o['_id'] }
|
|
|
|
end
|
|
|
|
|
2010-03-19 18:31:10 +00:00
|
|
|
def generate_index_name(spec)
|
2009-10-19 21:14:41 +00:00
|
|
|
indexes = []
|
|
|
|
spec.each_pair do |field, direction|
|
|
|
|
indexes.push("#{field}_#{direction}")
|
2009-12-08 22:52:07 +00:00
|
|
|
end
|
2009-10-19 21:14:41 +00:00
|
|
|
indexes.join("_")
|
|
|
|
end
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|
2009-11-17 18:20:57 +00:00
|
|
|
|
2008-11-22 01:00:51 +00:00
|
|
|
end
|