share test create db code, add a comment for the rb_thread_blocking_region helper in the C code

This commit is contained in:
Roger Pack 2009-04-18 22:55:17 +00:00
parent f9c62edae3
commit e14c8b9876
4 changed files with 31 additions and 25 deletions

View File

@ -248,6 +248,11 @@ typedef struct
} arg_holder, *arg_holder2;
// here's the call to make rb_thread_blocking_region much cleaner and easier
// syntax: param_count+2, func_pointer to call, [RUBY_UBF_IO or RUBY_UBF_PROCESS], param1, param2...
// the third parameter is the interuptor--possible values appear to be RUBY_UBF_IO or RUBY_UBF_PROCESS http://groups.google.com/group/comp.lang.ruby/browse_thread/thread/ad8c1326b2a8e404/00447b9aa15979be?lnk=raot
// ex: (int) returned_this = rb_thread_blocking_region_variable_params(10, &method_name, RUBY_UBF_IO, param1, param2, param3, param4, param5, param6, param7, param8)
static void call_single_function_rb_thread_blocking_region(void *arg_holder_in);
void *rb_thread_blocking_region_variable_params(int number, ...)

22
test/create_test_db.rb Normal file
View File

@ -0,0 +1,22 @@
# To run first execute:
=begin
create database local_test_db;
use local_test_db;
CREATE TABLE test_table (
c1 INT,
c2 VARCHAR(20)
);
=end
# This script shows the effect of using .all_hashes instead of looping on each hash
# run it by substiting in a 'long' [many row] query for the query variable and toggling use_all_hashes here at the top
# note that we load all the rows first, then run .all_hashes on the result [to see more easily the effect of all hashes]
# on my machine and a 200_000 row table, it took 3.38s versus 3.65s for the old .each_hash way [note also that .each_hash is
# almost as fast, now, as .all_hashes--they've both been optimized]
require 'mysqlplus'
puts 'initing db'
# init the DB
conn = Mysql.real_connect('localhost', 'root', '', 'local_test_db')
conn.query("delete from test_table")
200_000.times {conn.query(" insert into test_table (c1, c2) values (3, 'ABCDEFG')")}
puts 'connection pool ready'

View File

@ -1,18 +1,4 @@
# To run first execute:
=begin
create database local_test_db;
use local_test_db;
CREATE TABLE test_table (
c1 INT,
c2 VARCHAR(20)
);
=end
# This script shows the effect of using .all_hashes instead of looping on each hash
# run it by substiting in a 'long' [many row] query for the query variable and toggling use_all_hashes here at the top
# note that we load all the rows first, then run .all_hashes on the result [to see more easily the effect of all hashes]
# on my machine and a 200_000 row table, it took 3.38s versus 3.65s for the old .each_hash way [note also that .each_hash is
# almost as fast, now, as .all_hashes--they've both been optimized]
require 'mysqlplus'
require 'create_test_db'
use_the_all_hashes_method = true
@ -25,13 +11,6 @@ $count.times do
$connections << Mysql.real_connect('localhost','root', '', 'local_test_db')
end
puts 'initing db'
# init the DB
conn = Mysql.real_connect('localhost', 'root', '', 'local_test_db')
conn.query("delete from test_table")
200_000.times {conn.query(" insert into test_table (c1, c2) values (3, 'ABCDEFG')")}
puts 'connection pool ready'
$threads = []
$count.times do |i|

View File

@ -5,7 +5,7 @@
# from .82s to .62s
# you can experiment with it by changing the query here to be a long one, and toggling the do_the_use_query_optimization variable
# this also has the interesting property of 'freeing' Ruby to do thread changes mid-query.
require 'mysqlplus'
require 'create_test_db'
do_the_use_query_optimization = true
@ -15,7 +15,7 @@ $start = Time.now
$connections = []
$count.times do
$connections << Mysql.real_connect('localhost','root', '', 'local_leadgen_dev')
$connections << Mysql.real_connect('localhost','root', '', 'local_test_db')
end
puts 'connection pool ready'
@ -27,7 +27,7 @@ $count.times do |i|
puts "sending query on connection #{i}"
conn = $connections[i]
saved = []
query = "select * from campus_zips"
query = "select * from test_table"
if do_the_use_query_optimization
conn.query_with_result=false
result = conn.async_query(query)