added ensure_index
This commit is contained in:
parent
5e81cf2f82
commit
d33ddfb8e0
|
@ -74,6 +74,8 @@ module Mongo
|
||||||
@db, @name = db, name
|
@db, @name = db, name
|
||||||
@connection = @db.connection
|
@connection = @db.connection
|
||||||
@logger = @connection.logger
|
@logger = @connection.logger
|
||||||
|
@cache_time = @db.cache_time
|
||||||
|
@cache = Hash.new(0)
|
||||||
unless pk_factory
|
unless pk_factory
|
||||||
@safe = options.has_key?(:safe) ? options[:safe] : @db.safe
|
@safe = options.has_key?(:safe) ? options[:safe] : @db.safe
|
||||||
end
|
end
|
||||||
|
@ -407,53 +409,48 @@ module Mongo
|
||||||
# @core indexes create_index-instance_method
|
# @core indexes create_index-instance_method
|
||||||
def create_index(spec, opts={})
|
def create_index(spec, opts={})
|
||||||
opts[:dropDups] = opts.delete(:drop_dups) if opts[:drop_dups]
|
opts[:dropDups] = opts.delete(:drop_dups) if opts[:drop_dups]
|
||||||
field_spec = BSON::OrderedHash.new
|
field_spec = parse_index_spec(spec)
|
||||||
if spec.is_a?(String) || spec.is_a?(Symbol)
|
|
||||||
field_spec[spec.to_s] = 1
|
|
||||||
elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
|
|
||||||
spec.each do |f|
|
|
||||||
if [Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D].include?(f[1])
|
|
||||||
field_spec[f[0].to_s] = f[1]
|
|
||||||
else
|
|
||||||
raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " +
|
|
||||||
"should be one of Mongo::ASCENDING (1), Mongo::DESCENDING (-1) or Mongo::GEO2D ('2d')."
|
|
||||||
end
|
|
||||||
end
|
|
||||||
else
|
|
||||||
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
|
||||||
"should be either a string, symbol, or an array of arrays."
|
|
||||||
end
|
|
||||||
|
|
||||||
name = opts.delete(:name) || generate_index_name(field_spec)
|
name = opts.delete(:name) || generate_index_name(field_spec)
|
||||||
|
|
||||||
selector = {
|
generate_indexes(field_spec, name, opts)
|
||||||
:name => name,
|
|
||||||
:ns => "#{@db.name}.#{@name}",
|
|
||||||
:key => field_spec
|
|
||||||
}
|
|
||||||
selector.merge!(opts)
|
|
||||||
|
|
||||||
begin
|
|
||||||
insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, true)
|
|
||||||
|
|
||||||
rescue Mongo::OperationFailure => e
|
|
||||||
if selector[:dropDups] && e.message =~ /^11000/
|
|
||||||
# NOP. If the user is intentionally dropping dups, we can ignore duplicate key errors.
|
|
||||||
else
|
|
||||||
raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
|
|
||||||
"#{e.message}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
name
|
name
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
# Calls create_index and sets a flag to not do so again for another X minutes.
|
||||||
|
# this time can be specified as an option when initializing a Mongo::Db object as options[:cache_time]
|
||||||
|
# Any changes to an index will be propogated through regardless of cache time (eg, if you change index direction)
|
||||||
|
# @example Call sequence:
|
||||||
|
# Time t: @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and sets the 5 minute cache
|
||||||
|
# Time t+2min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- doesn't do anything
|
||||||
|
# Time t+3min : @posts.ensure_index([['something_else', Mongo::ASCENDING]) -- calls create_index and sets 5 minute cache
|
||||||
|
# Time t+10min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and resets the 5 minute counter
|
||||||
|
def ensure_index(spec, opts={})
|
||||||
|
valid = BSON::OrderedHash.new
|
||||||
|
now = Time.now.utc.to_i
|
||||||
|
field_spec = parse_index_spec(spec)
|
||||||
|
|
||||||
|
|
||||||
|
field_spec.each do |key, value|
|
||||||
|
cache_key = generate_index_name({key => value}) #bit of a hack.
|
||||||
|
timeout = @cache[cache_key] || 0
|
||||||
|
valid[key] = value if timeout <= now
|
||||||
|
end
|
||||||
|
name = opts.delete(:name) || generate_index_name(valid)
|
||||||
|
generate_indexes(valid, name, opts) if valid.any?
|
||||||
|
|
||||||
|
# I do this here instead of in the above loop in case there were any errors inserting. Best to be safe.
|
||||||
|
name.each {|n| @cache[n] = now + @cache_time}
|
||||||
|
name
|
||||||
|
end
|
||||||
|
|
||||||
# Drop a specified index.
|
# Drop a specified index.
|
||||||
#
|
#
|
||||||
# @param [String] name
|
# @param [String] name
|
||||||
#
|
#
|
||||||
# @core indexes
|
# @core indexes
|
||||||
def drop_index(name)
|
def drop_index(name)
|
||||||
|
@cache[name] = [] # I do this first because there is no harm in clearing the cache.
|
||||||
@db.drop_index(@name, name)
|
@db.drop_index(@name, name)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -461,7 +458,7 @@ module Mongo
|
||||||
#
|
#
|
||||||
# @core indexes
|
# @core indexes
|
||||||
def drop_indexes
|
def drop_indexes
|
||||||
|
@cache = {}
|
||||||
# Note: calling drop_indexes with no args will drop them all.
|
# Note: calling drop_indexes with no args will drop them all.
|
||||||
@db.drop_index(@name, '*')
|
@db.drop_index(@name, '*')
|
||||||
end
|
end
|
||||||
|
@ -712,6 +709,51 @@ module Mongo
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
|
|
||||||
|
def parse_index_spec(spec)
|
||||||
|
field_spec = BSON::OrderedHash.new
|
||||||
|
if spec.is_a?(String) || spec.is_a?(Symbol)
|
||||||
|
field_spec[spec.to_s] = 1
|
||||||
|
elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
|
||||||
|
spec.each do |f|
|
||||||
|
if [Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D].include?(f[1])
|
||||||
|
field_spec[f[0].to_s] = f[1]
|
||||||
|
else
|
||||||
|
raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " +
|
||||||
|
"should be one of Mongo::ASCENDING (1), Mongo::DESCENDING (-1) or Mongo::GEO2D ('2d')."
|
||||||
|
end
|
||||||
|
end
|
||||||
|
else
|
||||||
|
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
||||||
|
"should be either a string, symbol, or an array of arrays."
|
||||||
|
end
|
||||||
|
field_spec
|
||||||
|
end
|
||||||
|
|
||||||
|
def generate_indexes(field_spec, name, opts)
|
||||||
|
selector = {
|
||||||
|
:name => name,
|
||||||
|
:ns => "#{@db.name}.#{@name}",
|
||||||
|
:key => field_spec
|
||||||
|
}
|
||||||
|
selector.merge!(opts)
|
||||||
|
|
||||||
|
begin
|
||||||
|
insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, true)
|
||||||
|
|
||||||
|
rescue Mongo::OperationFailure => e
|
||||||
|
if selector[:dropDups] && e.message =~ /^11000/
|
||||||
|
# NOP. If the user is intentionally dropping dups, we can ignore duplicate key errors.
|
||||||
|
else
|
||||||
|
raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
|
||||||
|
"#{e.message}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
# Sends a Mongo::Constants::OP_INSERT message to the database.
|
# Sends a Mongo::Constants::OP_INSERT message to the database.
|
||||||
# Takes an array of +documents+, an optional +collection_name+, and a
|
# Takes an array of +documents+, an optional +collection_name+, and a
|
||||||
|
|
|
@ -51,6 +51,9 @@ module Mongo
|
||||||
# The Mongo::Connection instance connecting to the MongoDB server.
|
# The Mongo::Connection instance connecting to the MongoDB server.
|
||||||
attr_reader :connection
|
attr_reader :connection
|
||||||
|
|
||||||
|
# The length of time that Collection.ensure_index should cache index calls
|
||||||
|
attr_accessor :cache_time
|
||||||
|
|
||||||
# Instances of DB are normally obtained by calling Mongo#db.
|
# Instances of DB are normally obtained by calling Mongo#db.
|
||||||
#
|
#
|
||||||
# @param [String] db_name the database name.
|
# @param [String] db_name the database name.
|
||||||
|
@ -70,6 +73,7 @@ module Mongo
|
||||||
# value is provided, the default value set on this instance's Connection object will be used. This
|
# value is provided, the default value set on this instance's Connection object will be used. This
|
||||||
# default can be overridden upon instantiation of any collection by explicity setting a :safe value
|
# default can be overridden upon instantiation of any collection by explicity setting a :safe value
|
||||||
# on initialization
|
# on initialization
|
||||||
|
# @option options [Integer] :cache_time (300) Set the time that all ensure_index calls should cache the command.
|
||||||
#
|
#
|
||||||
# @core databases constructor_details
|
# @core databases constructor_details
|
||||||
def initialize(db_name, connection, options={})
|
def initialize(db_name, connection, options={})
|
||||||
|
@ -78,6 +82,7 @@ module Mongo
|
||||||
@strict = options[:strict]
|
@strict = options[:strict]
|
||||||
@pk_factory = options[:pk]
|
@pk_factory = options[:pk]
|
||||||
@safe = options.has_key?(:safe) ? options[:safe] : @connection.safe
|
@safe = options.has_key?(:safe) ? options[:safe] : @connection.safe
|
||||||
|
@cache_time = options[:cache_time] || 300 #5 minutes.
|
||||||
end
|
end
|
||||||
|
|
||||||
# Authenticate with the given username and password. Note that mongod
|
# Authenticate with the given username and password. Note that mongod
|
||||||
|
|
|
@ -565,6 +565,30 @@ class TestCollection < Test::Unit::TestCase
|
||||||
|
|
||||||
assert_equal 1, x
|
assert_equal 1, x
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_index
|
||||||
|
@@test.drop_indexes
|
||||||
|
@@test.insert("x" => "hello world")
|
||||||
|
assert_equal 1, @@test.index_information.keys.count #default index
|
||||||
|
|
||||||
|
@@test.ensure_index([["x", Mongo::DESCENDING]], {})
|
||||||
|
assert_equal 2, @@test.index_information.keys.count
|
||||||
|
assert @@test.index_information.keys.include? "x_-1"
|
||||||
|
|
||||||
|
@@test.ensure_index([["x", Mongo::ASCENDING]])
|
||||||
|
assert @@test.index_information.keys.include? "x_1"
|
||||||
|
|
||||||
|
@@test.drop_index("x_1")
|
||||||
|
assert_equal 2, @@test.index_information.keys.count
|
||||||
|
@@test.drop_index("x_-1")
|
||||||
|
assert_equal 1, @@test.index_information.keys.count
|
||||||
|
|
||||||
|
@@test.ensure_index([["x", Mongo::DESCENDING]], {}) #should work as not cached.
|
||||||
|
assert_equal 2, @@test.index_information.keys.count
|
||||||
|
assert @@test.index_information.keys.include? "x_-1"
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
context "Grouping" do
|
context "Grouping" do
|
||||||
setup do
|
setup do
|
||||||
|
|
|
@ -81,5 +81,54 @@ class CollectionTest < Test::Unit::TestCase
|
||||||
@logger.stubs(:debug)
|
@logger.stubs(:debug)
|
||||||
@coll.update({}, {:title => 'Moby Dick'}, :safe => true)
|
@coll.update({}, {:title => 'Moby Dick'}, :safe => true)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
should "not call insert for each ensure_index call" do
|
||||||
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
||||||
|
@db = @conn['testing']
|
||||||
|
@coll = @db.collection('books')
|
||||||
|
@coll.expects(:generate_indexes).once
|
||||||
|
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
||||||
|
|
||||||
|
end
|
||||||
|
should "call generate_indexes for a new direction on the same field for ensure_index" do
|
||||||
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
||||||
|
@db = @conn['testing']
|
||||||
|
@coll = @db.collection('books')
|
||||||
|
@coll.expects(:generate_indexes).twice
|
||||||
|
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
||||||
|
@coll.ensure_index [["x", Mongo::ASCENDING]]
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
should "call generate_indexes twice because the cache time is 0 seconds" do
|
||||||
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
||||||
|
@db = @conn['testing']
|
||||||
|
@db.cache_time = 0
|
||||||
|
@coll = @db.collection('books')
|
||||||
|
@coll.expects(:generate_indexes).twice
|
||||||
|
|
||||||
|
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
should "call generate_indexes for each key when calling ensure_indexes" do
|
||||||
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
||||||
|
@db = @conn['testing']
|
||||||
|
@db.cache_time = 300
|
||||||
|
@coll = @db.collection('books')
|
||||||
|
@coll.expects(:generate_indexes).once.with do |a, b, c|
|
||||||
|
a == {"x"=>-1, "y"=>-1}
|
||||||
|
end
|
||||||
|
|
||||||
|
@coll.ensure_index [["x", Mongo::DESCENDING], ["y", Mongo::DESCENDING]]
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
Loading…
Reference in New Issue