summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
committerAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
commit5d342a758c6095b4d30aba0750b54f13b8916f51 (patch)
tree762e9aa84781f5e3b96db2c02d356c29cf0217c0
parentcbe2d992e9cd1ea66af9fa91df006106775d3073 (diff)
downloadmongodb-5d342a758c6095b4d30aba0750b54f13b8916f51.tar.gz
Imported Upstream version 2.0.0
-rw-r--r--.gitignore8
-rwxr-xr-xREADME31
-rw-r--r--SConstruct321
-rw-r--r--bson/bson-inl.h373
-rw-r--r--bson/bson.h45
-rw-r--r--bson/bsondemo/bsondemo.cpp6
-rw-r--r--bson/bsondemo/bsondemo.vcxproj8
-rw-r--r--bson/bsonelement.h50
-rw-r--r--bson/bsonmisc.h16
-rw-r--r--bson/bsonobj.h134
-rw-r--r--bson/bsonobjbuilder.h121
-rw-r--r--bson/bsonobjiterator.h54
-rw-r--r--bson/inline_decls.h69
-rw-r--r--bson/oid.cpp31
-rw-r--r--bson/ordering.h23
-rw-r--r--bson/stringdata.h52
-rw-r--r--bson/util/atomic_int.h11
-rw-r--r--bson/util/builder.h87
-rw-r--r--bson/util/misc.h21
-rwxr-xr-xbuildscripts/errorcodes.py62
-rw-r--r--buildscripts/hacks_ubuntu.py10
-rw-r--r--buildscripts/makealldists.py291
-rw-r--r--buildscripts/makedist.py940
-rw-r--r--buildscripts/mergerepositories.py194
-rw-r--r--buildscripts/packager.py982
-rwxr-xr-xbuildscripts/smoke.py42
-rw-r--r--client/clientOnly.cpp5
-rw-r--r--client/connpool.cpp223
-rw-r--r--client/connpool.h119
-rw-r--r--client/dbclient.cpp147
-rw-r--r--client/dbclient.h100
-rw-r--r--client/dbclient_rs.cpp335
-rw-r--r--client/dbclient_rs.h86
-rw-r--r--client/dbclientcursor.cpp123
-rw-r--r--client/dbclientcursor.h82
-rw-r--r--client/distlock.cpp921
-rw-r--r--client/distlock.h164
-rw-r--r--client/distlock_test.cpp394
-rw-r--r--client/examples/clientTest.cpp29
-rw-r--r--client/examples/httpClientTest.cpp33
-rw-r--r--client/examples/insert_demo.cpp47
-rw-r--r--client/examples/rs.cpp80
-rwxr-xr-xclient/examples/simple_client_demo.vcxproj92
-rwxr-xr-xclient/examples/simple_client_demo.vcxproj.filters21
-rw-r--r--client/examples/whereExample.cpp3
-rw-r--r--client/mongo_client_lib.cpp31
-rw-r--r--client/parallel.cpp369
-rw-r--r--client/parallel.h22
-rw-r--r--client/redef_macros.h5
-rw-r--r--client/simple_client_demo.cpp36
-rw-r--r--client/syncclusterconnection.cpp42
-rw-r--r--client/syncclusterconnection.h22
-rw-r--r--client/undef_macros.h2
-rw-r--r--db/btree.cpp1198
-rw-r--r--db/btree.h942
-rw-r--r--db/btreebuilder.cpp184
-rw-r--r--db/btreebuilder.h53
-rw-r--r--db/btreecursor.cpp394
-rw-r--r--db/cap.cpp12
-rw-r--r--db/client.cpp241
-rw-r--r--db/client.h28
-rw-r--r--db/clientcursor.cpp276
-rw-r--r--db/clientcursor.h56
-rw-r--r--db/cloner.cpp123
-rw-r--r--db/cloner.h39
-rw-r--r--db/cmdline.cpp125
-rw-r--r--db/cmdline.h69
-rw-r--r--db/commands.cpp51
-rw-r--r--db/commands.h34
-rw-r--r--db/commands/distinct.cpp29
-rw-r--r--db/commands/find_and_modify.cpp153
-rw-r--r--db/commands/group.cpp37
-rw-r--r--db/commands/isself.cpp18
-rw-r--r--db/commands/mr.cpp465
-rw-r--r--db/commands/mr.h49
-rw-r--r--db/common.cpp39
-rw-r--r--db/compact.cpp361
-rw-r--r--db/compact.h50
-rw-r--r--db/concurrency.h9
-rw-r--r--db/curop.h98
-rw-r--r--db/cursor.h20
-rw-r--r--db/database.cpp106
-rw-r--r--db/database.h20
-rw-r--r--db/db.cpp489
-rw-r--r--db/db.h38
-rwxr-xr-x[-rw-r--r--]db/db.vcxproj1627
-rwxr-xr-xdb/db.vcxproj.filters96
-rwxr-xr-xdb/db_10.sln28
-rw-r--r--db/dbcommands.cpp495
-rw-r--r--db/dbcommands_admin.cpp226
-rw-r--r--db/dbcommands_generic.cpp150
-rw-r--r--db/dbeval.cpp6
-rw-r--r--db/dbhelpers.cpp36
-rw-r--r--db/dbmessage.cpp108
-rw-r--r--db/dbmessage.h137
-rw-r--r--db/dbwebserver.cpp35
-rw-r--r--db/diskloc.h16
-rw-r--r--db/driverHelpers.cpp2
-rw-r--r--db/dur.cpp292
-rw-r--r--db/dur.h10
-rw-r--r--db/dur_commitjob.cpp19
-rw-r--r--db/dur_commitjob.h19
-rw-r--r--db/dur_journal.cpp240
-rw-r--r--db/dur_journal.h16
-rw-r--r--db/dur_journalformat.h58
-rw-r--r--db/dur_journalimpl.h22
-rw-r--r--db/dur_preplogbuffer.cpp52
-rw-r--r--db/dur_recover.cpp186
-rw-r--r--db/dur_recover.h15
-rw-r--r--db/dur_stats.h3
-rw-r--r--db/dur_writetodatafiles.cpp24
-rw-r--r--db/durop.cpp3
-rw-r--r--db/durop.h2
-rw-r--r--db/extsort.cpp15
-rw-r--r--db/extsort.h59
-rw-r--r--db/geo/2d.cpp2937
-rw-r--r--db/geo/core.h90
-rw-r--r--db/geo/haystack.cpp18
-rw-r--r--db/index.cpp140
-rw-r--r--db/index.h85
-rw-r--r--db/indexkey.cpp381
-rw-r--r--db/indexkey.h29
-rw-r--r--db/instance.cpp285
-rw-r--r--db/instance.h14
-rw-r--r--db/introspect.cpp61
-rw-r--r--db/introspect.h3
-rw-r--r--db/jsobj.cpp477
-rw-r--r--db/json.cpp21
-rw-r--r--db/key.cpp671
-rw-r--r--db/key.h112
-rw-r--r--db/lasterror.cpp4
-rw-r--r--db/matcher.cpp610
-rw-r--r--db/matcher.h132
-rw-r--r--db/matcher_covered.cpp32
-rw-r--r--db/modules/mms.cpp4
-rw-r--r--db/mongommf.cpp118
-rw-r--r--db/mongommf.h5
-rw-r--r--db/namespace-inl.h12
-rw-r--r--db/namespace.cpp27
-rw-r--r--db/namespace.h77
-rw-r--r--db/nonce.cpp50
-rw-r--r--db/nonce.h20
-rw-r--r--db/oplog.cpp216
-rw-r--r--db/oplog.h141
-rw-r--r--db/oplogreader.h51
-rw-r--r--db/ops/delete.cpp242
-rw-r--r--db/ops/delete.h33
-rw-r--r--db/ops/query.cpp (renamed from db/query.cpp)358
-rw-r--r--db/ops/query.h (renamed from db/query.h)154
-rw-r--r--db/ops/update.cpp (renamed from db/update.cpp)431
-rw-r--r--db/ops/update.h (renamed from db/update.h)47
-rw-r--r--db/pdfile.cpp880
-rw-r--r--db/pdfile.h94
-rw-r--r--db/projection.cpp2
-rw-r--r--db/projection.h2
-rw-r--r--db/queryoptimizer.cpp829
-rw-r--r--db/queryoptimizer.h543
-rw-r--r--db/queryoptimizercursor.cpp387
-rw-r--r--db/querypattern.cpp54
-rw-r--r--db/querypattern.h76
-rw-r--r--db/queryutil-inl.h153
-rw-r--r--db/queryutil.cpp716
-rw-r--r--db/queryutil.h744
-rw-r--r--db/record.cpp230
-rw-r--r--db/repl.cpp832
-rw-r--r--db/repl.h212
-rw-r--r--db/repl/connections.h15
-rw-r--r--db/repl/consensus.cpp118
-rw-r--r--db/repl/health.cpp55
-rw-r--r--db/repl/health.h14
-rw-r--r--db/repl/heartbeat.cpp89
-rw-r--r--db/repl/manager.cpp62
-rw-r--r--db/repl/multicmd.h6
-rw-r--r--db/repl/replset_commands.cpp97
-rw-r--r--db/repl/rs.cpp270
-rw-r--r--db/repl/rs.h256
-rw-r--r--db/repl/rs_config.cpp348
-rw-r--r--db/repl/rs_config.h144
-rw-r--r--db/repl/rs_initialsync.cpp135
-rw-r--r--db/repl/rs_initiate.cpp41
-rw-r--r--db/repl/rs_member.h19
-rw-r--r--db/repl/rs_rollback.cpp28
-rw-r--r--db/repl/rs_sync.cpp341
-rw-r--r--db/repl_block.cpp49
-rw-r--r--db/repl_block.h1
-rw-r--r--db/replpair.h238
-rw-r--r--db/replutil.h98
-rw-r--r--db/restapi.cpp12
-rw-r--r--db/scanandorder.cpp93
-rw-r--r--db/scanandorder.h120
-rw-r--r--db/security.cpp74
-rwxr-xr-x[-rw-r--r--]db/security.h87
-rw-r--r--db/security_commands.cpp175
-rw-r--r--db/security_common.cpp (renamed from db/security_key.cpp)35
-rw-r--r--db/security_common.h83
-rw-r--r--db/security_key.h47
-rw-r--r--db/stats/counters.h2
-rw-r--r--db/stats/snapshots.cpp14
-rw-r--r--db/stats/top.cpp12
-rw-r--r--dbtests/basictests.cpp77
-rw-r--r--dbtests/btreetests.cpp1702
-rw-r--r--dbtests/btreetests.inl1702
-rw-r--r--dbtests/clienttests.cpp2
-rw-r--r--dbtests/cursortests.cpp66
-rw-r--r--dbtests/dbtests.cpp3
-rw-r--r--dbtests/directclienttests.cpp31
-rw-r--r--dbtests/framework.cpp65
-rw-r--r--dbtests/jsobjtests.cpp381
-rw-r--r--dbtests/jsontests.cpp23
-rw-r--r--dbtests/jstests.cpp119
-rw-r--r--dbtests/mockdbclient.h97
-rw-r--r--dbtests/namespacetests.cpp500
-rw-r--r--dbtests/pairingtests.cpp344
-rw-r--r--dbtests/pdfiletests.cpp4
-rw-r--r--dbtests/perf/perftest.cpp70
-rw-r--r--dbtests/perftests.cpp691
-rw-r--r--dbtests/queryoptimizertests.cpp2885
-rw-r--r--dbtests/querytests.cpp179
-rw-r--r--dbtests/queryutiltests.cpp989
-rw-r--r--dbtests/repltests.cpp183
-rw-r--r--dbtests/socktests.cpp2
-rw-r--r--dbtests/spin_lock_test.cpp13
-rwxr-xr-xdbtests/test.sln26
-rw-r--r--dbtests/test.vcxproj1486
-rwxr-xr-xdbtests/test.vcxproj.filters275
-rw-r--r--dbtests/threadedtests.cpp389
-rw-r--r--dbtests/updatetests.cpp17
-rw-r--r--distsrc/README74
-rwxr-xr-x[-rw-r--r--]distsrc/client/SConstruct11
-rw-r--r--docs/errors.md1564
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/ageoutjournalfiles.js16
-rw-r--r--jstests/and.js86
-rw-r--r--jstests/and2.js27
-rw-r--r--jstests/and3.js66
-rw-r--r--jstests/andor.js105
-rw-r--r--jstests/apitest_dbcollection.js2
-rw-r--r--jstests/array_match2.js25
-rw-r--r--jstests/array_match3.js13
-rw-r--r--jstests/arrayfind2.js3
-rw-r--r--jstests/arrayfind4.js22
-rw-r--r--jstests/arrayfind5.js23
-rw-r--r--jstests/auth/auth1.js5
-rw-r--r--jstests/auth/auth2.js23
-rw-r--r--jstests/auth/rename.js40
-rw-r--r--jstests/auth1.js17
-rw-r--r--jstests/auth2.js6
-rw-r--r--jstests/bench_test1.js16
-rw-r--r--jstests/bench_test2.js41
-rw-r--r--jstests/big_object1.js2
-rw-r--r--jstests/binData.js14
-rw-r--r--jstests/capped.js8
-rw-r--r--jstests/capped2.js10
-rw-r--r--jstests/capped5.js7
-rw-r--r--jstests/capped6.js2
-rw-r--r--jstests/capped8.js40
-rw-r--r--jstests/capped9.js28
-rw-r--r--jstests/cappeda.js33
-rw-r--r--jstests/compact.js37
-rwxr-xr-xjstests/compact_speed_test.js61
-rw-r--r--jstests/date1.js5
-rw-r--r--jstests/date2.js13
-rw-r--r--jstests/date3.js31
-rw-r--r--jstests/dbcase.js16
-rw-r--r--jstests/dbcase2.js9
-rw-r--r--jstests/dbhash.js10
-rw-r--r--jstests/delx.js1
-rw-r--r--jstests/disk/directoryperdb.js2
-rw-r--r--jstests/disk/diskfull.js8
-rw-r--r--jstests/disk/newcollection.js20
-rw-r--r--jstests/disk/norepeat.js2
-rw-r--r--jstests/disk/quota.js47
-rw-r--r--jstests/disk/quota2.js38
-rw-r--r--jstests/disk/repair3.js2
-rw-r--r--jstests/disk/repair5.js43
-rw-r--r--jstests/distinct1.js1
-rw-r--r--jstests/distinct_index1.js10
-rw-r--r--jstests/drop2.js2
-rw-r--r--jstests/drop3.js29
-rw-r--r--jstests/dropdb.js17
-rw-r--r--jstests/dropdb_race.js44
-rw-r--r--jstests/dur/closeall.js76
-rw-r--r--jstests/dur/data/empty.bson0
-rw-r--r--jstests/dur/diskfull.js51
-rw-r--r--jstests/dur/dropdb.js21
-rwxr-xr-xjstests/dur/dur1.js25
-rwxr-xr-xjstests/dur/dur1_tool.js152
-rw-r--r--jstests/dur/indexbg.js7
-rw-r--r--jstests/dur/indexbg2.js19
-rwxr-xr-xjstests/dur/manyRestart.js4
-rw-r--r--jstests/eval_nolock.js2
-rw-r--r--jstests/evalb.js2
-rw-r--r--jstests/evalc.js11
-rw-r--r--jstests/evald.js10
-rw-r--r--jstests/exists3.js21
-rw-r--r--jstests/exists4.js20
-rw-r--r--jstests/exists5.js33
-rw-r--r--jstests/exists6.js63
-rw-r--r--jstests/exists7.js21
-rw-r--r--jstests/exists8.js76
-rw-r--r--jstests/exists9.js41
-rw-r--r--jstests/find8.js27
-rw-r--r--jstests/find_and_modify2.js6
-rw-r--r--jstests/fsync.js17
-rw-r--r--jstests/geo10.js21
-rw-r--r--jstests/geo4.js2
-rw-r--r--jstests/geo_array0.js25
-rw-r--r--jstests/geo_array1.js30
-rw-r--r--jstests/geo_array2.js163
-rw-r--r--jstests/geo_borders.js263
-rw-r--r--jstests/geo_center_sphere2.js158
-rw-r--r--jstests/geo_distinct.js16
-rw-r--r--jstests/geo_fiddly_box.js44
-rw-r--r--jstests/geo_fiddly_box2.js32
-rw-r--r--jstests/geo_group.js35
-rw-r--r--jstests/geo_mapreduce.js56
-rw-r--r--jstests/geo_mapreduce2.js36
-rw-r--r--jstests/geo_multinest0.js63
-rw-r--r--jstests/geo_multinest1.js37
-rw-r--r--jstests/geo_oob_sphere.js42
-rw-r--r--jstests/geo_poly_edge.js22
-rw-r--r--jstests/geo_poly_line.js17
-rw-r--r--jstests/geo_polygon1.js74
-rw-r--r--jstests/geo_polygon2.js266
-rw-r--r--jstests/geo_polygon3.js54
-rw-r--r--jstests/geo_regex0.js18
-rw-r--r--jstests/geo_small_large.js151
-rw-r--r--jstests/geo_uniqueDocs.js38
-rw-r--r--jstests/getlog1.js24
-rw-r--r--jstests/group7.js43
-rw-r--r--jstests/hint1.js12
-rw-r--r--jstests/idhack.js23
-rw-r--r--jstests/in8.js23
-rw-r--r--jstests/in9.js35
-rw-r--r--jstests/ina.js15
-rw-r--r--jstests/index11.js30
-rw-r--r--jstests/index9.js8
-rw-r--r--jstests/index_big1.js39
-rwxr-xr-xjstests/index_bigkeys.js78
-rw-r--r--jstests/index_check5.js2
-rw-r--r--jstests/index_check8.js12
-rw-r--r--jstests/index_fornew.js13
-rw-r--r--jstests/index_maxkey.js27
-rwxr-xr-xjstests/indexbindata.js0
-rw-r--r--jstests/indexk.js58
-rw-r--r--jstests/indexl.js27
-rw-r--r--jstests/indexm.js38
-rw-r--r--jstests/indexn.js41
-rw-r--r--jstests/indexo.js32
-rw-r--r--jstests/indexp.js58
-rw-r--r--jstests/indexq.js14
-rw-r--r--jstests/indexr.js47
-rw-r--r--jstests/indexs.js21
-rw-r--r--jstests/indext.js21
-rw-r--r--jstests/indexu.js137
-rw-r--r--jstests/indexv.js18
-rw-r--r--jstests/indexw.js14
-rw-r--r--jstests/insert1.js3
-rw-r--r--jstests/libs/geo_near_random.js37
-rw-r--r--jstests/libs/key1 (renamed from jstests/replsets/key1)0
-rw-r--r--jstests/libs/key2 (renamed from jstests/replsets/key2)0
-rw-r--r--jstests/libs/testconfig4
-rw-r--r--jstests/mr_errorhandling.js2
-rw-r--r--jstests/mr_merge2.js37
-rw-r--r--jstests/numberint.js92
-rw-r--r--jstests/numberlong2.js32
-rw-r--r--jstests/numberlong3.js25
-rw-r--r--jstests/or1.js2
-rw-r--r--jstests/or2.js3
-rw-r--r--jstests/or3.js4
-rw-r--r--jstests/or4.js2
-rw-r--r--jstests/ord.js1
-rw-r--r--jstests/org.js19
-rw-r--r--jstests/orh.js17
-rw-r--r--jstests/ori.js48
-rw-r--r--jstests/orj.js121
-rw-r--r--jstests/ork.js11
-rw-r--r--jstests/orl.js13
-rw-r--r--jstests/orm.js29
-rw-r--r--jstests/orn.js22
-rw-r--r--jstests/profile1.js144
-rw-r--r--jstests/profile2.js19
-rw-r--r--jstests/profile3.js26
-rw-r--r--jstests/push.js36
-rw-r--r--jstests/query1.js3
-rw-r--r--jstests/regex2.js8
-rw-r--r--jstests/regex6.js11
-rw-r--r--jstests/regexa.js19
-rw-r--r--jstests/remove10.js28
-rw-r--r--jstests/remove2.js5
-rw-r--r--jstests/remove9.js16
-rw-r--r--jstests/rename.js19
-rw-r--r--jstests/repl/basic1.js19
-rw-r--r--jstests/repl/dbcase.js95
-rw-r--r--jstests/repl/drop_dups.js68
-rw-r--r--jstests/repl/mastermaster1.js23
-rw-r--r--jstests/repl/mod_move.js69
-rw-r--r--jstests/repl/pair1.js100
-rw-r--r--jstests/repl/pair2.js71
-rw-r--r--jstests/repl/pair3.js245
-rw-r--r--jstests/repl/pair4.js160
-rw-r--r--jstests/repl/pair5.js95
-rw-r--r--jstests/repl/pair6.js115
-rw-r--r--jstests/repl/pair7.js85
-rw-r--r--jstests/repl/repl2.js29
-rw-r--r--jstests/repl/repl3.js58
-rw-r--r--jstests/repl/replacePeer1.js82
-rw-r--r--jstests/repl/replacePeer2.js86
-rw-r--r--jstests/repl/snapshot2.js72
-rw-r--r--jstests/repl/snapshot3.js53
-rw-r--r--jstests/replsets/auth1.js35
-rw-r--r--jstests/replsets/cloneDb.js18
-rw-r--r--jstests/replsets/config1.js21
-rwxr-xr-xjstests/replsets/downstream.js36
-rw-r--r--jstests/replsets/fastsync.js151
-rw-r--r--jstests/replsets/initial_sync1.js5
-rw-r--r--jstests/replsets/initial_sync3.js37
-rw-r--r--jstests/replsets/maintenance.js32
-rw-r--r--jstests/replsets/majority.js60
-rw-r--r--jstests/replsets/randomcommands1.js29
-rw-r--r--jstests/replsets/reconfig.js69
-rw-r--r--jstests/replsets/remove1.js130
-rw-r--r--jstests/replsets/replset1.js22
-rw-r--r--jstests/replsets/replset3.js2
-rw-r--r--jstests/replsets/replset5.js88
-rw-r--r--jstests/replsets/replsetadd.js34
-rw-r--r--jstests/replsets/replsetarb1.js33
-rw-r--r--jstests/replsets/replsetarb2.js13
-rw-r--r--jstests/replsets/replsetarb3.js144
-rw-r--r--jstests/replsets/replsetfreeze.js4
-rw-r--r--jstests/replsets/replsetrestart1.js14
-rw-r--r--jstests/replsets/replsetrestart2.js8
-rw-r--r--jstests/replsets/rollback2.js19
-rw-r--r--jstests/replsets/rollback4.js117
-rw-r--r--jstests/replsets/rslib.js44
-rw-r--r--jstests/replsets/slavedelay1.js104
-rw-r--r--jstests/replsets/stale_clustered.js101
-rw-r--r--jstests/replsets/stepdown.js142
-rwxr-xr-xjstests/replsets/stepdown2.js139
-rw-r--r--jstests/replsets/sync1.js396
-rw-r--r--jstests/replsets/sync2.js48
-rw-r--r--jstests/replsets/tags.js154
-rw-r--r--jstests/replsets/tags2.js44
-rw-r--r--jstests/replsets/toostale.js34
-rw-r--r--jstests/replsets/twosets.js35
-rw-r--r--jstests/set7.js16
-rw-r--r--jstests/sharding/addshard1.js2
-rw-r--r--jstests/sharding/addshard4.js26
-rw-r--r--jstests/sharding/array_shard_key.js127
-rw-r--r--jstests/sharding/auth.js177
-rw-r--r--jstests/sharding/count_slaveok.js69
-rw-r--r--jstests/sharding/drop_sharded_db.js62
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/features3.js61
-rw-r--r--jstests/sharding/group_slaveok.js68
-rw-r--r--jstests/sharding/index1.js174
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateMemory.js54
-rw-r--r--jstests/sharding/multi_mongos1.js3
-rw-r--r--jstests/sharding/multi_mongos2.js61
-rw-r--r--jstests/sharding/parallel.js38
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard6.js3
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js3
-rw-r--r--jstests/sharding/shard_keycount.js45
-rw-r--r--jstests/sharding/sharding_with_keyfile.js69
-rwxr-xr-xjstests/sharding/sharding_with_keyfile.key3
-rw-r--r--jstests/sharding/sync6.js81
-rw-r--r--jstests/sharding/sync7.js63
-rw-r--r--jstests/shell1.js6
-rw-r--r--jstests/shellkillop.js126
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/skip1.js15
-rw-r--r--jstests/slowNightly/background.js51
-rw-r--r--jstests/slowNightly/command_line_parsing.js12
-rw-r--r--jstests/slowNightly/dur_big_atomic_update.js17
-rw-r--r--jstests/slowNightly/dur_remove_old_journals.js27
-rw-r--r--jstests/slowNightly/geo_axis_aligned.js108
-rw-r--r--jstests/slowNightly/geo_mnypts.js51
-rw-r--r--jstests/slowNightly/geo_polygon.js53
-rw-r--r--jstests/slowNightly/index_check10.js133
-rw-r--r--jstests/slowNightly/index_check9.js2
-rw-r--r--jstests/slowNightly/replReads.js108
-rw-r--r--jstests/slowNightly/replsets_priority1.js173
-rw-r--r--jstests/slowNightly/sharding_balance1.js3
-rw-r--r--jstests/slowNightly/sharding_balance4.js8
-rw-r--r--jstests/slowNightly/sharding_migrateBigObject.js61
-rw-r--r--jstests/slowNightly/sharding_multiple_ns_rs.js49
-rw-r--r--jstests/slowNightly/sharding_passthrough.js16
-rw-r--r--jstests/slowNightly/sharding_rs1.js8
-rw-r--r--jstests/slowNightly/sharding_rs2.js22
-rw-r--r--jstests/slowNightly/sharding_rs_arb1.js40
-rw-r--r--jstests/slowNightly/sync6_slow.js82
-rw-r--r--jstests/slowWeekly/geo_full.js487
-rw-r--r--jstests/slowWeekly/geo_mnypts_plus_fields.js98
-rw-r--r--jstests/slowWeekly/query_yield2.js2
-rw-r--r--jstests/slowWeekly/repair2.js29
-rw-r--r--jstests/slowWeekly/update_yield1.js2
-rw-r--r--jstests/sort10.js48
-rw-r--r--jstests/sort2.js22
-rw-r--r--jstests/sort7.js25
-rw-r--r--jstests/sort8.js30
-rw-r--r--jstests/sort9.js26
-rw-r--r--jstests/sorta.js26
-rw-r--r--jstests/tool/csv1.js8
-rw-r--r--jstests/tool/csvexport1.js45
-rw-r--r--jstests/tool/csvexport2.js31
-rw-r--r--jstests/tool/csvimport1.js40
-rw-r--r--jstests/tool/data/a.tsv2
-rw-r--r--jstests/tool/data/csvimport1.csv8
-rw-r--r--jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--jstests/tool/dumprestore5.js36
-rw-r--r--jstests/tool/dumprestore6.js27
-rw-r--r--jstests/tool/exportimport1.js29
-rw-r--r--jstests/tool/tsv1.js32
-rw-r--r--jstests/type2.js19
-rw-r--r--jstests/type3.js68
-rw-r--r--jstests/unique2.js53
-rw-r--r--jstests/uniqueness.js13
-rw-r--r--jstests/update.js13
-rw-r--r--jstests/update_blank1.js12
-rw-r--r--jstests/update_invalid1.js6
-rw-r--r--jstests/updatea.js6
-rw-r--r--jstests/updatef.js24
-rw-r--r--jstests/updateg.js17
-rw-r--r--pch.cpp10
-rw-r--r--pch.h36
-rw-r--r--rpm/mongo.spec4
-rw-r--r--rpm/mongod.conf6
-rw-r--r--s/balance.cpp68
-rw-r--r--s/balance.h4
-rw-r--r--s/balancer_policy.cpp20
-rw-r--r--s/chunk.cpp557
-rw-r--r--s/chunk.h103
-rw-r--r--s/client.cpp10
-rw-r--r--s/client.h6
-rw-r--r--s/commands_admin.cpp204
-rw-r--r--s/commands_public.cpp559
-rw-r--r--s/config.cpp264
-rw-r--r--s/config.h19
-rw-r--r--s/config_migrate.cpp2
-rw-r--r--s/cursors.cpp24
-rw-r--r--s/d_chunk_manager.cpp25
-rw-r--r--s/d_chunk_manager.h17
-rw-r--r--s/d_logic.cpp14
-rw-r--r--s/d_logic.h3
-rw-r--r--s/d_migrate.cpp258
-rw-r--r--s/d_split.cpp100
-rw-r--r--s/d_state.cpp48
-rw-r--r--s/d_writeback.cpp80
-rw-r--r--s/d_writeback.h41
-rw-r--r--s/dbgrid.vcxproj1201
-rwxr-xr-xs/dbgrid.vcxproj.filters137
-rw-r--r--s/grid.cpp52
-rw-r--r--s/mr_shard.cpp312
-rw-r--r--s/mr_shard.h232
-rw-r--r--s/request.cpp28
-rw-r--r--s/request.h4
-rw-r--r--s/s_only.cpp11
-rw-r--r--s/security.cpp112
-rw-r--r--s/server.cpp65
-rw-r--r--s/server.h2
-rw-r--r--s/shard.cpp53
-rw-r--r--s/shard.h19
-rw-r--r--s/shard_version.cpp74
-rw-r--r--s/shard_version.h1
-rw-r--r--s/shardconnection.cpp47
-rw-r--r--s/shardkey.cpp20
-rw-r--r--s/shardkey.h16
-rw-r--r--s/strategy.cpp26
-rw-r--r--s/strategy.h6
-rw-r--r--s/strategy_shard.cpp270
-rw-r--r--s/strategy_single.cpp31
-rw-r--r--s/util.h2
-rw-r--r--s/writeback_listener.cpp106
-rw-r--r--s/writeback_listener.h6
-rw-r--r--scripting/bench.cpp105
-rw-r--r--scripting/engine.cpp45
-rw-r--r--scripting/engine.h43
-rw-r--r--scripting/engine_java.cpp10
-rw-r--r--scripting/engine_spidermonkey.cpp129
-rw-r--r--scripting/engine_spidermonkey.h34
-rw-r--r--scripting/engine_v8.cpp1166
-rw-r--r--scripting/engine_v8.h122
-rw-r--r--scripting/sm_db.cpp120
-rw-r--r--scripting/utils.cpp19
-rw-r--r--scripting/v8_db.cpp605
-rw-r--r--scripting/v8_db.h132
-rw-r--r--scripting/v8_utils.cpp78
-rw-r--r--scripting/v8_utils.h6
-rw-r--r--scripting/v8_wrapper.cpp569
-rw-r--r--scripting/v8_wrapper.h15
-rw-r--r--server.h21
-rw-r--r--shell/collection.js52
-rw-r--r--shell/db.js77
-rw-r--r--shell/dbshell.cpp198
-rw-r--r--shell/mongo.js9
-rw-r--r--shell/mongo_vstudio.cpp491
-rw-r--r--shell/msvc/mongo.vcxproj14
-rw-r--r--shell/msvc/mongo.vcxproj.filters27
-rw-r--r--shell/query.js1
-rwxr-xr-x[-rw-r--r--]shell/servers.js1151
-rw-r--r--shell/shell_utils.cpp103
-rw-r--r--shell/utils.js284
-rw-r--r--shell/utils_sh.js98
-rwxr-xr-xspeed.js13
-rw-r--r--third_party/README6
-rw-r--r--third_party/js-1.7/Makefile.in388
-rw-r--r--third_party/js-1.7/Makefile.ref375
-rw-r--r--third_party/js-1.7/README.html826
-rw-r--r--third_party/js-1.7/SpiderMonkey.rsp12
-rw-r--r--third_party/js-1.7/Y.js19
-rw-r--r--third_party/js-1.7/config.mk186
-rw-r--r--third_party/js-1.7/config/AIX4.1.mk65
-rw-r--r--third_party/js-1.7/config/AIX4.2.mk64
-rw-r--r--third_party/js-1.7/config/AIX4.3.mk65
-rw-r--r--third_party/js-1.7/config/CVS/Entries36
-rw-r--r--third_party/js-1.7/config/CVS/Repository1
-rw-r--r--third_party/js-1.7/config/CVS/Root1
-rw-r--r--third_party/js-1.7/config/CVS/Tag1
-rw-r--r--third_party/js-1.7/config/Darwin.mk83
-rwxr-xr-xthird_party/js-1.7/config/Darwin1.3.mk81
-rwxr-xr-xthird_party/js-1.7/config/Darwin1.4.mk41
-rwxr-xr-xthird_party/js-1.7/config/Darwin5.2.mk81
-rw-r--r--third_party/js-1.7/config/Darwin5.3.mk81
-rw-r--r--third_party/js-1.7/config/HP-UXB.10.10.mk77
-rw-r--r--third_party/js-1.7/config/HP-UXB.10.20.mk77
-rw-r--r--third_party/js-1.7/config/HP-UXB.11.00.mk80
-rw-r--r--third_party/js-1.7/config/IRIX.mk87
-rw-r--r--third_party/js-1.7/config/IRIX5.3.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.1.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.2.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.3.mk44
-rw-r--r--third_party/js-1.7/config/IRIX6.5.mk44
-rw-r--r--third_party/js-1.7/config/Linux_All.mk103
-rwxr-xr-xthird_party/js-1.7/config/Mac_OS10.0.mk82
-rw-r--r--third_party/js-1.7/config/OSF1V4.0.mk72
-rw-r--r--third_party/js-1.7/config/OSF1V5.0.mk69
-rw-r--r--third_party/js-1.7/config/SunOS4.1.4.mk101
-rw-r--r--third_party/js-1.7/config/SunOS5.3.mk91
-rw-r--r--third_party/js-1.7/config/SunOS5.4.mk92
-rw-r--r--third_party/js-1.7/config/SunOS5.5.1.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.5.mk87
-rw-r--r--third_party/js-1.7/config/SunOS5.6.mk89
-rw-r--r--third_party/js-1.7/config/SunOS5.7.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.8.mk44
-rw-r--r--third_party/js-1.7/config/SunOS5.9.mk44
-rw-r--r--third_party/js-1.7/config/WINNT4.0.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.0.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.1.mk117
-rw-r--r--third_party/js-1.7/config/WINNT5.2.mk117
-rw-r--r--third_party/js-1.7/config/dgux.mk64
-rw-r--r--third_party/js-1.7/fdlibm/.cvsignore7
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Entries87
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Repository1
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Root1
-rw-r--r--third_party/js-1.7/fdlibm/CVS/Tag1
-rw-r--r--third_party/js-1.7/fdlibm/Makefile.in127
-rw-r--r--third_party/js-1.7/fdlibm/Makefile.ref192
-rw-r--r--third_party/js-1.7/fdlibm/e_acos.c147
-rw-r--r--third_party/js-1.7/fdlibm/e_acosh.c105
-rw-r--r--third_party/js-1.7/fdlibm/e_asin.c156
-rw-r--r--third_party/js-1.7/fdlibm/e_atan2.c165
-rw-r--r--third_party/js-1.7/fdlibm/e_atanh.c110
-rw-r--r--third_party/js-1.7/fdlibm/e_cosh.c133
-rw-r--r--third_party/js-1.7/fdlibm/e_exp.c202
-rw-r--r--third_party/js-1.7/fdlibm/e_fmod.c184
-rw-r--r--third_party/js-1.7/fdlibm/e_gamma.c71
-rw-r--r--third_party/js-1.7/fdlibm/e_gamma_r.c70
-rw-r--r--third_party/js-1.7/fdlibm/e_hypot.c173
-rw-r--r--third_party/js-1.7/fdlibm/e_j0.c524
-rw-r--r--third_party/js-1.7/fdlibm/e_j1.c523
-rw-r--r--third_party/js-1.7/fdlibm/e_jn.c315
-rw-r--r--third_party/js-1.7/fdlibm/e_lgamma.c71
-rw-r--r--third_party/js-1.7/fdlibm/e_lgamma_r.c347
-rw-r--r--third_party/js-1.7/fdlibm/e_log.c184
-rw-r--r--third_party/js-1.7/fdlibm/e_log10.c134
-rw-r--r--third_party/js-1.7/fdlibm/e_pow.c386
-rw-r--r--third_party/js-1.7/fdlibm/e_rem_pio2.c222
-rw-r--r--third_party/js-1.7/fdlibm/e_remainder.c120
-rw-r--r--third_party/js-1.7/fdlibm/e_scalb.c89
-rw-r--r--third_party/js-1.7/fdlibm/e_sinh.c122
-rw-r--r--third_party/js-1.7/fdlibm/e_sqrt.c497
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.h273
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.mak1453
-rw-r--r--third_party/js-1.7/fdlibm/fdlibm.mdpbin0 -> 42143 bytes
-rw-r--r--third_party/js-1.7/fdlibm/k_cos.c135
-rw-r--r--third_party/js-1.7/fdlibm/k_rem_pio2.c354
-rw-r--r--third_party/js-1.7/fdlibm/k_sin.c114
-rw-r--r--third_party/js-1.7/fdlibm/k_standard.c785
-rw-r--r--third_party/js-1.7/fdlibm/k_tan.c170
-rw-r--r--third_party/js-1.7/fdlibm/s_asinh.c101
-rw-r--r--third_party/js-1.7/fdlibm/s_atan.c175
-rw-r--r--third_party/js-1.7/fdlibm/s_cbrt.c133
-rw-r--r--third_party/js-1.7/fdlibm/s_ceil.c120
-rw-r--r--third_party/js-1.7/fdlibm/s_copysign.c72
-rw-r--r--third_party/js-1.7/fdlibm/s_cos.c118
-rw-r--r--third_party/js-1.7/fdlibm/s_erf.c356
-rw-r--r--third_party/js-1.7/fdlibm/s_expm1.c267
-rw-r--r--third_party/js-1.7/fdlibm/s_fabs.c70
-rw-r--r--third_party/js-1.7/fdlibm/s_finite.c71
-rw-r--r--third_party/js-1.7/fdlibm/s_floor.c121
-rw-r--r--third_party/js-1.7/fdlibm/s_frexp.c99
-rw-r--r--third_party/js-1.7/fdlibm/s_ilogb.c85
-rw-r--r--third_party/js-1.7/fdlibm/s_isnan.c74
-rw-r--r--third_party/js-1.7/fdlibm/s_ldexp.c66
-rw-r--r--third_party/js-1.7/fdlibm/s_lib_version.c73
-rw-r--r--third_party/js-1.7/fdlibm/s_log1p.c211
-rw-r--r--third_party/js-1.7/fdlibm/s_logb.c79
-rw-r--r--third_party/js-1.7/fdlibm/s_matherr.c64
-rw-r--r--third_party/js-1.7/fdlibm/s_modf.c132
-rw-r--r--third_party/js-1.7/fdlibm/s_nextafter.c124
-rw-r--r--third_party/js-1.7/fdlibm/s_rint.c131
-rw-r--r--third_party/js-1.7/fdlibm/s_scalbn.c107
-rw-r--r--third_party/js-1.7/fdlibm/s_signgam.c40
-rw-r--r--third_party/js-1.7/fdlibm/s_significand.c68
-rw-r--r--third_party/js-1.7/fdlibm/s_sin.c118
-rw-r--r--third_party/js-1.7/fdlibm/s_tan.c112
-rw-r--r--third_party/js-1.7/fdlibm/s_tanh.c122
-rw-r--r--third_party/js-1.7/fdlibm/w_acos.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_acosh.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_asin.c80
-rw-r--r--third_party/js-1.7/fdlibm/w_atan2.c79
-rw-r--r--third_party/js-1.7/fdlibm/w_atanh.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_cosh.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_exp.c88
-rw-r--r--third_party/js-1.7/fdlibm/w_fmod.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_gamma.c85
-rw-r--r--third_party/js-1.7/fdlibm/w_gamma_r.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_hypot.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_j0.c105
-rw-r--r--third_party/js-1.7/fdlibm/w_j1.c106
-rw-r--r--third_party/js-1.7/fdlibm/w_jn.c128
-rw-r--r--third_party/js-1.7/fdlibm/w_lgamma.c85
-rw-r--r--third_party/js-1.7/fdlibm/w_lgamma_r.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_log.c78
-rw-r--r--third_party/js-1.7/fdlibm/w_log10.c81
-rw-r--r--third_party/js-1.7/fdlibm/w_pow.c99
-rw-r--r--third_party/js-1.7/fdlibm/w_remainder.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_scalb.c95
-rw-r--r--third_party/js-1.7/fdlibm/w_sinh.c77
-rw-r--r--third_party/js-1.7/fdlibm/w_sqrt.c77
-rw-r--r--third_party/js-1.7/js.c3181
-rw-r--r--third_party/js-1.7/js.mak4344
-rw-r--r--third_party/js-1.7/js.mdpbin0 -> 17922 bytes
-rw-r--r--third_party/js-1.7/js.msg301
-rw-r--r--third_party/js-1.7/js.pkg2
-rw-r--r--third_party/js-1.7/js3240.rc79
-rw-r--r--third_party/js-1.7/jsOS240.def654
-rw-r--r--third_party/js-1.7/jsapi.c5011
-rw-r--r--third_party/js-1.7/jsapi.h2220
-rw-r--r--third_party/js-1.7/jsarena.c502
-rw-r--r--third_party/js-1.7/jsarena.h303
-rw-r--r--third_party/js-1.7/jsarray.c1864
-rw-r--r--third_party/js-1.7/jsarray.h95
-rw-r--r--third_party/js-1.7/jsatom.c999
-rw-r--r--third_party/js-1.7/jsatom.h456
-rw-r--r--third_party/js-1.7/jsbit.h195
-rw-r--r--third_party/js-1.7/jsbool.c227
-rw-r--r--third_party/js-1.7/jsbool.h76
-rw-r--r--third_party/js-1.7/jsclist.h139
-rw-r--r--third_party/js-1.7/jscntxt.c1229
-rw-r--r--third_party/js-1.7/jscntxt.h1013
-rw-r--r--third_party/js-1.7/jscompat.h57
-rw-r--r--third_party/js-1.7/jsconfig.h208
-rw-r--r--third_party/js-1.7/jsconfig.mk181
-rw-r--r--third_party/js-1.7/jscpucfg.c380
-rw-r--r--third_party/js-1.7/jscpucfg.h212
-rw-r--r--third_party/js-1.7/jsdate.c2371
-rw-r--r--third_party/js-1.7/jsdate.h120
-rw-r--r--third_party/js-1.7/jsdbgapi.c1439
-rw-r--r--third_party/js-1.7/jsdbgapi.h406
-rw-r--r--third_party/js-1.7/jsdhash.c826
-rw-r--r--third_party/js-1.7/jsdhash.h581
-rw-r--r--third_party/js-1.7/jsdtoa.c3132
-rw-r--r--third_party/js-1.7/jsdtoa.h130
-rw-r--r--third_party/js-1.7/jsemit.c6845
-rw-r--r--third_party/js-1.7/jsemit.h743
-rw-r--r--third_party/js-1.7/jsexn.c1348
-rw-r--r--third_party/js-1.7/jsexn.h96
-rw-r--r--third_party/js-1.7/jsfile.c2735
-rw-r--r--third_party/js-1.7/jsfile.h56
-rw-r--r--third_party/js-1.7/jsfile.msg90
-rw-r--r--third_party/js-1.7/jsfun.c2330
-rw-r--r--third_party/js-1.7/jsfun.h170
-rw-r--r--third_party/js-1.7/jsgc.c3201
-rw-r--r--third_party/js-1.7/jsgc.h368
-rw-r--r--third_party/js-1.7/jshash.c483
-rw-r--r--third_party/js-1.7/jshash.h151
-rw-r--r--third_party/js-1.7/jsify.pl485
-rw-r--r--third_party/js-1.7/jsinterp.c6216
-rw-r--r--third_party/js-1.7/jsinterp.h361
-rw-r--r--third_party/js-1.7/jsiter.c1080
-rw-r--r--third_party/js-1.7/jsiter.h114
-rw-r--r--third_party/js-1.7/jskeyword.tbl124
-rw-r--r--third_party/js-1.7/jskwgen.c460
-rw-r--r--third_party/js-1.7/jslibmath.h266
-rw-r--r--third_party/js-1.7/jslock.c1303
-rw-r--r--third_party/js-1.7/jslock.h266
-rw-r--r--third_party/js-1.7/jslocko.asm60
-rw-r--r--third_party/js-1.7/jslog2.c94
-rw-r--r--third_party/js-1.7/jslong.c281
-rw-r--r--third_party/js-1.7/jslong.h437
-rw-r--r--third_party/js-1.7/jsmath.c514
-rw-r--r--third_party/js-1.7/jsmath.h57
-rw-r--r--third_party/js-1.7/jsnum.c1147
-rw-r--r--third_party/js-1.7/jsnum.h268
-rw-r--r--third_party/js-1.7/jsobj.c5035
-rw-r--r--third_party/js-1.7/jsobj.h596
-rw-r--r--third_party/js-1.7/jsopcode.c4794
-rw-r--r--third_party/js-1.7/jsopcode.h318
-rw-r--r--third_party/js-1.7/jsopcode.tbl478
-rw-r--r--third_party/js-1.7/jsosdep.h115
-rw-r--r--third_party/js-1.7/jsotypes.h202
-rw-r--r--third_party/js-1.7/jsparse.c6547
-rw-r--r--third_party/js-1.7/jsparse.h438
-rw-r--r--third_party/js-1.7/jsprf.c1264
-rw-r--r--third_party/js-1.7/jsprf.h150
-rw-r--r--third_party/js-1.7/jsproto.tbl116
-rw-r--r--third_party/js-1.7/jsprvtd.h202
-rw-r--r--third_party/js-1.7/jspubtd.h667
-rw-r--r--third_party/js-1.7/jsregexp.c4206
-rw-r--r--third_party/js-1.7/jsregexp.h183
-rw-r--r--third_party/js-1.7/jsscan.c2101
-rw-r--r--third_party/js-1.7/jsscan.h389
-rw-r--r--third_party/js-1.7/jsscope.c1776
-rw-r--r--third_party/js-1.7/jsscope.h407
-rw-r--r--third_party/js-1.7/jsscript.c1717
-rw-r--r--third_party/js-1.7/jsscript.h225
-rw-r--r--third_party/js-1.7/jsshell.msg50
-rw-r--r--third_party/js-1.7/jsstddef.h83
-rw-r--r--third_party/js-1.7/jsstr.c4818
-rw-r--r--third_party/js-1.7/jsstr.h500
-rw-r--r--third_party/js-1.7/jstypes.h464
-rw-r--r--third_party/js-1.7/jsutil.c198
-rw-r--r--third_party/js-1.7/jsutil.h106
-rw-r--r--third_party/js-1.7/jsxdrapi.c835
-rw-r--r--third_party/js-1.7/jsxdrapi.h223
-rw-r--r--third_party/js-1.7/jsxml.c8357
-rw-r--r--third_party/js-1.7/jsxml.h332
-rw-r--r--third_party/js-1.7/lock_SunOS.s114
-rw-r--r--third_party/js-1.7/perfect.js39
-rw-r--r--third_party/js-1.7/plify_jsdhash.sed33
-rw-r--r--third_party/js-1.7/prmjtime.c439
-rw-r--r--third_party/js-1.7/prmjtime.h95
-rw-r--r--third_party/js-1.7/resource.h15
-rw-r--r--third_party/js-1.7/rules.mk193
-rw-r--r--third_party/js-1.7/win32.order391
-rw-r--r--third_party/linenoise/Makefile7
-rw-r--r--third_party/linenoise/README.markdown47
-rw-r--r--third_party/linenoise/example.c27
-rw-r--r--third_party/linenoise/history.txt3
-rw-r--r--third_party/linenoise/linenoise.cpp836
-rw-r--r--third_party/linenoise/linenoise.h55
-rw-r--r--third_party/linenoise/linenoise_win32.cpp442
-rw-r--r--third_party/pcre-7.4/config-cmake.h.in (renamed from pcre-7.4/config-cmake.h.in)0
-rw-r--r--third_party/pcre-7.4/config.h (renamed from pcre-7.4/config.h)22
-rw-r--r--third_party/pcre-7.4/config.h.generic (renamed from pcre-7.4/config.h.generic)0
-rw-r--r--third_party/pcre-7.4/config.h.in (renamed from pcre-7.4/config.h.in)0
-rw-r--r--third_party/pcre-7.4/dftables.c (renamed from pcre-7.4/dftables.c)0
-rw-r--r--third_party/pcre-7.4/pcre.h (renamed from pcre-7.4/pcre.h)0
-rw-r--r--third_party/pcre-7.4/pcre.h.generic (renamed from pcre-7.4/pcre.h.generic)0
-rw-r--r--third_party/pcre-7.4/pcre.h.in (renamed from pcre-7.4/pcre.h.in)0
-rw-r--r--third_party/pcre-7.4/pcre_chartables.c (renamed from pcre-7.4/pcre_chartables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_chartables.c.dist (renamed from pcre-7.4/pcre_chartables.c.dist)0
-rw-r--r--third_party/pcre-7.4/pcre_compile.c (renamed from pcre-7.4/pcre_compile.c)0
-rw-r--r--third_party/pcre-7.4/pcre_config.c (renamed from pcre-7.4/pcre_config.c)0
-rw-r--r--third_party/pcre-7.4/pcre_dfa_exec.c (renamed from pcre-7.4/pcre_dfa_exec.c)0
-rw-r--r--third_party/pcre-7.4/pcre_exec.c (renamed from pcre-7.4/pcre_exec.c)4
-rw-r--r--third_party/pcre-7.4/pcre_fullinfo.c (renamed from pcre-7.4/pcre_fullinfo.c)0
-rw-r--r--third_party/pcre-7.4/pcre_get.c (renamed from pcre-7.4/pcre_get.c)0
-rw-r--r--third_party/pcre-7.4/pcre_globals.c (renamed from pcre-7.4/pcre_globals.c)0
-rw-r--r--third_party/pcre-7.4/pcre_info.c (renamed from pcre-7.4/pcre_info.c)0
-rw-r--r--third_party/pcre-7.4/pcre_internal.h (renamed from pcre-7.4/pcre_internal.h)0
-rw-r--r--third_party/pcre-7.4/pcre_maketables.c (renamed from pcre-7.4/pcre_maketables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_newline.c (renamed from pcre-7.4/pcre_newline.c)0
-rw-r--r--third_party/pcre-7.4/pcre_ord2utf8.c (renamed from pcre-7.4/pcre_ord2utf8.c)0
-rw-r--r--third_party/pcre-7.4/pcre_refcount.c (renamed from pcre-7.4/pcre_refcount.c)0
-rw-r--r--third_party/pcre-7.4/pcre_scanner.cc (renamed from pcre-7.4/pcre_scanner.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_scanner.h (renamed from pcre-7.4/pcre_scanner.h)4
-rw-r--r--third_party/pcre-7.4/pcre_scanner_unittest.cc (renamed from pcre-7.4/pcre_scanner_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.cc (renamed from pcre-7.4/pcre_stringpiece.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.h (renamed from pcre-7.4/pcre_stringpiece.h)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece.h.in (renamed from pcre-7.4/pcre_stringpiece.h.in)0
-rw-r--r--third_party/pcre-7.4/pcre_stringpiece_unittest.cc (renamed from pcre-7.4/pcre_stringpiece_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcre_study.c (renamed from pcre-7.4/pcre_study.c)0
-rw-r--r--third_party/pcre-7.4/pcre_tables.c (renamed from pcre-7.4/pcre_tables.c)0
-rw-r--r--third_party/pcre-7.4/pcre_try_flipped.c (renamed from pcre-7.4/pcre_try_flipped.c)0
-rw-r--r--third_party/pcre-7.4/pcre_ucp_searchfuncs.c (renamed from pcre-7.4/pcre_ucp_searchfuncs.c)0
-rw-r--r--third_party/pcre-7.4/pcre_valid_utf8.c (renamed from pcre-7.4/pcre_valid_utf8.c)0
-rw-r--r--third_party/pcre-7.4/pcre_version.c (renamed from pcre-7.4/pcre_version.c)0
-rw-r--r--third_party/pcre-7.4/pcre_xclass.c (renamed from pcre-7.4/pcre_xclass.c)0
-rw-r--r--third_party/pcre-7.4/pcrecpp.cc (renamed from pcre-7.4/pcrecpp.cc)2
-rw-r--r--third_party/pcre-7.4/pcrecpp.h (renamed from pcre-7.4/pcrecpp.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpp_internal.h (renamed from pcre-7.4/pcrecpp_internal.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpp_unittest.cc (renamed from pcre-7.4/pcrecpp_unittest.cc)0
-rw-r--r--third_party/pcre-7.4/pcrecpparg.h (renamed from pcre-7.4/pcrecpparg.h)0
-rw-r--r--third_party/pcre-7.4/pcrecpparg.h.in (renamed from pcre-7.4/pcrecpparg.h.in)0
-rw-r--r--third_party/pcre-7.4/pcredemo.c (renamed from pcre-7.4/pcredemo.c)0
-rw-r--r--third_party/pcre-7.4/pcregrep.c (renamed from pcre-7.4/pcregrep.c)0
-rw-r--r--third_party/pcre-7.4/pcreposix.c (renamed from pcre-7.4/pcreposix.c)0
-rw-r--r--third_party/pcre-7.4/pcreposix.h (renamed from pcre-7.4/pcreposix.h)0
-rw-r--r--third_party/pcre-7.4/pcretest.c (renamed from pcre-7.4/pcretest.c)0
-rw-r--r--third_party/pcre-7.4/ucp.h (renamed from pcre-7.4/ucp.h)0
-rw-r--r--third_party/pcre-7.4/ucpinternal.h (renamed from pcre-7.4/ucpinternal.h)0
-rw-r--r--third_party/pcre-7.4/ucptable.h (renamed from pcre-7.4/ucptable.h)0
-rw-r--r--third_party/pcre.py38
-rw-r--r--third_party/sm.py100
-rw-r--r--third_party/snappy.py11
-rwxr-xr-xthird_party/snappy/COPYING28
-rwxr-xr-xthird_party/snappy/README135
-rwxr-xr-xthird_party/snappy/config.h124
-rwxr-xr-xthird_party/snappy/snappy-internal.h150
-rwxr-xr-xthird_party/snappy/snappy-sinksource.cc72
-rwxr-xr-xthird_party/snappy/snappy-sinksource.h136
-rwxr-xr-xthird_party/snappy/snappy-stubs-internal.cc42
-rwxr-xr-xthird_party/snappy/snappy-stubs-internal.h478
-rwxr-xr-xthird_party/snappy/snappy-stubs-public.h85
-rwxr-xr-xthird_party/snappy/snappy.cc1026
-rwxr-xr-xthird_party/snappy/snappy.h155
-rw-r--r--tools/bridge.cpp9
-rw-r--r--tools/dump.cpp134
-rw-r--r--tools/export.cpp74
-rw-r--r--tools/import.cpp327
-rw-r--r--tools/restore.cpp43
-rw-r--r--tools/sniffer.cpp30
-rw-r--r--tools/stat.cpp72
-rw-r--r--tools/tool.cpp82
-rw-r--r--tools/tool.h4
-rw-r--r--tools/top.cpp196
-rw-r--r--util/alignedbuilder.cpp41
-rw-r--r--util/alignedbuilder.h16
-rw-r--r--util/array.h7
-rw-r--r--util/assert_util.cpp71
-rw-r--r--util/assert_util.h76
-rw-r--r--util/background.cpp70
-rw-r--r--util/background.h49
-rw-r--r--util/bson_util.h42
-rw-r--r--util/bufreader.h2
-rw-r--r--util/checksum.h37
-rw-r--r--util/compress.cpp31
-rw-r--r--util/compress.h21
-rw-r--r--util/concurrency/list.h28
-rw-r--r--util/concurrency/mutex.h103
-rw-r--r--util/concurrency/race.h49
-rw-r--r--util/concurrency/rwlock.h250
-rw-r--r--[-rwxr-xr-x]util/concurrency/shared_mutex_win.hpp29
-rw-r--r--util/concurrency/spin_lock.cpp45
-rw-r--r--util/concurrency/spin_lock.h33
-rw-r--r--util/concurrency/synchronization.cpp36
-rw-r--r--util/concurrency/synchronization.h18
-rw-r--r--util/concurrency/value.h51
-rw-r--r--util/concurrency/vars.cpp4
-rw-r--r--util/file.h76
-rw-r--r--util/file_allocator.cpp73
-rw-r--r--util/file_allocator.h6
-rw-r--r--util/goodies.h154
-rw-r--r--util/hashtab.h14
-rw-r--r--util/log.cpp38
-rw-r--r--util/log.h53
-rw-r--r--util/logfile.cpp87
-rw-r--r--util/logfile.h3
-rw-r--r--util/message.cpp764
-rw-r--r--util/mmap.cpp30
-rw-r--r--util/mmap.h38
-rw-r--r--util/mmap_posix.cpp24
-rw-r--r--util/mmap_win.cpp13
-rwxr-xr-xutil/mongoutils/README2
-rw-r--r--util/mongoutils/str.h8
-rw-r--r--util/mongoutils/test.cpp2
-rw-r--r--util/net/hostandport.h (renamed from util/hostandport.h)40
-rw-r--r--util/net/httpclient.cpp (renamed from util/httpclient.cpp)45
-rw-r--r--util/net/httpclient.h (renamed from util/httpclient.h)16
-rw-r--r--util/net/listen.cpp391
-rw-r--r--util/net/listen.h190
-rw-r--r--util/net/message.cpp64
-rw-r--r--util/net/message.h (renamed from util/message.h)210
-rw-r--r--util/net/message_port.cpp298
-rw-r--r--util/net/message_port.h107
-rw-r--r--util/net/message_server.h (renamed from util/message_server.h)16
-rw-r--r--util/net/message_server_asio.cpp (renamed from util/message_server_asio.cpp)0
-rw-r--r--util/net/message_server_port.cpp (renamed from util/message_server_port.cpp)66
-rw-r--r--util/net/miniwebserver.cpp (renamed from util/miniwebserver.cpp)23
-rw-r--r--util/net/miniwebserver.h (renamed from util/miniwebserver.h)10
-rw-r--r--util/net/sock.cpp713
-rw-r--r--util/net/sock.h256
-rw-r--r--util/optime.h36
-rw-r--r--util/paths.h42
-rw-r--r--util/processinfo.h6
-rw-r--r--util/processinfo_darwin.cpp5
-rw-r--r--util/processinfo_win32.cpp3
-rw-r--r--util/queue.h6
-rw-r--r--util/ramlog.cpp190
-rw-r--r--util/ramlog.h144
-rw-r--r--util/sock.cpp235
-rw-r--r--util/sock.h303
-rw-r--r--util/stringutils.h102
-rw-r--r--util/time_support.h80
-rw-r--r--util/timer.h83
-rw-r--r--util/util.cpp22
-rw-r--r--util/version.cpp145
-rw-r--r--util/version.h2
1003 files changed, 191592 insertions, 24726 deletions
diff --git a/.gitignore b/.gitignore
index 3847ca4..26afcde 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
.sconsign.dblite
.sconf_temp
perf.data
+massif.out.*
*~
*.swp
@@ -77,6 +78,7 @@ mongoimport
mongosniff
mongobridge
mongostat
+mongotop
bsondump
*.tgz
@@ -115,6 +117,12 @@ debian/mongodb
#osx
.DS_Store
+#third party
+third_party/js-1.7/jsautocfg.h
+third_party/js-1.7/jsautokw.h
+third_party/js-1.7/jskwgen
+third_party/js-1.7/jscpucfg
+
# QtCreator
*.config
*.creator
diff --git a/README b/README
index 7cf7653..ed84a68 100755
--- a/README
+++ b/README
@@ -1,14 +1,21 @@
MongoDB README
-DOCUMENTATION
-
- http://www.mongodb.org/
-
+Welcome to MongoDB!
+
COMPONENTS
mongod - The database process.
mongos - Sharding controller.
mongo - The database shell (uses interactive javascript).
+
+UTILITIES
+
+ mongodump - MongoDB dump tool - for backups, snapshots, etc..
+ mongorestore - MongoDB restore a dump
+ mongoexport - Export a single collection to test (JSON, CSV)
+ mongoimport - Import from JSON or CSV
+ mongofiles - Utility for putting and getting files from MongoDB GridFS
+ mongostat - Show performance statistics
BUILDING
@@ -31,11 +38,20 @@ RUNNING
DRIVERS
- Client drivers for most programming languages are available at mongodb.org.
+ Client drivers for most programming languages are available at mongodb.org. Use the
+ shell ("mongo") for administrative tasks.
-NOTES
+DOCUMENTATION
- Mongo uses memory mapped files. If built as a 32 bit executable, you will
+ http://www.mongodb.org/
+
+MAIL LISTS AND IRC
+
+ http://www.mongodb.org/display/DOCS/Community
+
+32 BIT BUILD NOTES
+
+ MongoDB uses memory mapped files. If built as a 32 bit executable, you will
not be able to work with large (multi-gigabyte) databases. However, 32 bit
builds work fine with small development databases.
@@ -48,4 +64,3 @@ LICENSE
As an exception, the files in the client/, debian/, rpm/,
utils/mongoutils, and all subdirectories thereof are made available under
the terms of the Apache License, version 2.0.
-
diff --git a/SConstruct b/SConstruct
index 41383b1..4e46052 100644
--- a/SConstruct
+++ b/SConstruct
@@ -22,6 +22,7 @@ import urllib
import urllib2
import buildscripts
import buildscripts.bb
+import stat
from buildscripts import utils
buildscripts.bb.checkOk()
@@ -31,12 +32,12 @@ def findSettingsSetup():
sys.path.append( ".." )
sys.path.append( "../../" )
-
-
# --- options ----
options = {}
+options_topass = {}
+
def add_option( name, help , nargs , contibutesToVariantDir , dest=None ):
if dest is None:
@@ -57,7 +58,7 @@ def add_option( name, help , nargs , contibutesToVariantDir , dest=None ):
def get_option( name ):
return GetOption( name )
-def has_option( name ):
+def _has_option( name ):
x = get_option( name )
if x is None:
return False
@@ -70,6 +71,12 @@ def has_option( name ):
return True
+def has_option( name ):
+ x = _has_option(name)
+ options_topass[name] = x
+ return x
+
+
def get_variant_dir():
a = []
@@ -114,6 +121,7 @@ add_option( "64" , "whether to force 64 bit" , 0 , True , "force64" )
add_option( "32" , "whether to force 32 bit" , 0 , True , "force32" )
add_option( "cxx", "compiler to use" , 1 , True )
+add_option( "cc", "compiler to use for c" , 1 , True )
add_option( "cpppath", "Include path if you have headers in a nonstandard directory" , 1 , True )
add_option( "libpath", "Library path if you have libraries in a nonstandard directory" , 1 , True )
@@ -127,10 +135,10 @@ add_option( "staticlibpath", "comma separated list of dirs to search for staticl
add_option( "boost-compiler", "compiler used for boost (gcc41)" , 1 , True , "boostCompiler" )
add_option( "boost-version", "boost version for linking(1_38)" , 1 , True , "boostVersion" )
-
# experimental features
add_option( "mm", "use main memory instead of memory mapped files" , 0 , True )
add_option( "asio" , "Use Asynchronous IO (NOT READY YET)" , 0 , True )
+add_option( "ssl" , "Enable SSL" , 0 , True )
# library choices
add_option( "usesm" , "use spider monkey for javascript" , 0 , True )
@@ -139,20 +147,24 @@ add_option( "usev8" , "use v8 for javascript" , 0 , True )
# mongo feature options
add_option( "noshell", "don't build shell" , 0 , True )
add_option( "safeshell", "don't let shell scripts run programs (still, don't run untrusted scripts)" , 0 , True )
+add_option( "win2008plus", "use newer operating system API features" , 0 , False )
# dev tools
add_option( "d", "debug build no optimization, etc..." , 0 , True , "debugBuild" )
add_option( "dd", "debug build no optimization, additional debug logging, etc..." , 0 , False , "debugBuildAndLogging" )
add_option( "durableDefaultOn" , "have durable default to on" , 0 , True )
+add_option( "durableDefaultOff" , "have durable default to off" , 0 , True )
add_option( "pch" , "use precompiled headers to speed up the build (experimental)" , 0 , True , "usePCH" )
add_option( "distcc" , "use distcc for distributing builds" , 0 , False )
+add_option( "clang" , "use clang++ rather than g++ (experimental)" , 0 , True )
# debugging/profiling help
# to use CPUPROFILE=/tmp/profile
# to view pprof -gv mongod /tmp/profile
add_option( "pg", "link against profiler" , 0 , False , "profile" )
+add_option( "tcmalloc" , "link against tcmalloc" , 0 , False )
add_option( "gdbserver" , "build in gdb server support" , 0 , True )
add_option( "heapcheck", "link to heap-checking malloc-lib and look for memory leaks during tests" , 0 , False )
@@ -211,6 +223,13 @@ env = Environment( MSVS_ARCH=msarch , tools = ["default", "gch"], toolpath = '.'
if has_option( "cxx" ):
env["CC"] = get_option( "cxx" )
env["CXX"] = get_option( "cxx" )
+elif has_option("clang"):
+ env["CC"] = 'clang'
+ env["CXX"] = 'clang++'
+
+if has_option( "cc" ):
+ env["CC"] = get_option( "cc" )
+
env["LIBPATH"] = []
if has_option( "libpath" ):
@@ -222,13 +241,15 @@ if has_option( "cpppath" ):
env.Append( CPPDEFINES=[ "_SCONS" , "MONGO_EXPOSE_MACROS" ] )
env.Append( CPPPATH=[ "." ] )
-
if has_option( "safeshell" ):
env.Append( CPPDEFINES=[ "MONGO_SAFE_SHELL" ] )
if has_option( "durableDefaultOn" ):
env.Append( CPPDEFINES=[ "_DURABLEDEFAULTON" ] )
+if has_option( "durableDefaultOff" ):
+ env.Append( CPPDEFINES=[ "_DURABLEDEFAULTOFF" ] )
+
boostCompiler = GetOption( "boostCompiler" )
if boostCompiler is None:
boostCompiler = ""
@@ -243,6 +264,7 @@ else:
if ( not ( usesm or usev8 or justClientLib) ):
usesm = True
+ options_topass["usesm"] = True
distBuild = len( COMMAND_LINE_TARGETS ) == 1 and ( str( COMMAND_LINE_TARGETS[0] ) == "s3dist" or str( COMMAND_LINE_TARGETS[0] ) == "dist" )
@@ -305,47 +327,53 @@ if has_option( "full" ):
# ------ SOURCE FILE SETUP -----------
-commonFiles = Split( "pch.cpp buildinfo.cpp db/common.cpp db/indexkey.cpp db/jsobj.cpp bson/oid.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/projection.cpp shell/mongo.cpp db/security_key.cpp" )
-commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/file_allocator.cpp" , "util/message.cpp" ,
- "util/assert_util.cpp" , "util/log.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
+commonFiles = Split( "pch.cpp buildinfo.cpp db/indexkey.cpp db/jsobj.cpp bson/oid.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/querypattern.cpp db/projection.cpp shell/mongo.cpp db/security_common.cpp db/security_commands.cpp" )
+commonFiles += [ "util/background.cpp" , "util/util.cpp" , "util/file_allocator.cpp" ,
+ "util/assert_util.cpp" , "util/log.cpp" , "util/ramlog.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
"util/concurrency/thread_pool.cpp", "util/password.cpp", "util/version.cpp", "util/signal_handlers.cpp",
"util/histogram.cpp", "util/concurrency/spin_lock.cpp", "util/text.cpp" , "util/stringutils.cpp" ,
"util/concurrency/synchronization.cpp" ]
-commonFiles += Glob( "util/*.c" )
+commonFiles += [ "util/net/sock.cpp" , "util/net/httpclient.cpp" , "util/net/message.cpp" , "util/net/message_port.cpp" , "util/net/listen.cpp" ]
+commonFiles += Glob( "util/*.c" )
commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/dbclient_rs.cpp client/dbclientcursor.cpp client/model.cpp client/syncclusterconnection.cpp client/distlock.cpp s/shardconnection.cpp" )
#mmap stuff
+coreDbFiles = [ "db/commands.cpp" ]
+coreServerFiles = [ "util/net/message_server_port.cpp" ,
+ "client/parallel.cpp" , "db/common.cpp",
+ "util/net/miniwebserver.cpp" , "db/dbwebserver.cpp" ,
+ "db/matcher.cpp" , "db/dbcommands_generic.cpp" , "db/dbmessage.cpp" ]
+
+mmapFiles = [ "util/mmap.cpp" ]
+
if has_option( "mm" ):
- commonFiles += [ "util/mmap_mm.cpp" ]
+ mmapFiles += [ "util/mmap_mm.cpp" ]
elif os.sys.platform == "win32":
- commonFiles += [ "util/mmap_win.cpp" ]
+ mmapFiles += [ "util/mmap_win.cpp" ]
else:
- commonFiles += [ "util/mmap_posix.cpp" ]
+ mmapFiles += [ "util/mmap_posix.cpp" ]
-coreDbFiles = [ "db/commands.cpp" ]
-coreServerFiles = [ "util/message_server_port.cpp" ,
- "client/parallel.cpp" ,
- "util/miniwebserver.cpp" , "db/dbwebserver.cpp" ,
- "db/matcher.cpp" , "db/dbcommands_generic.cpp" ]
+coreServerFiles += mmapFiles
processInfoFiles = [ "util/processinfo.cpp" ]
if os.path.exists( "util/processinfo_" + os.sys.platform + ".cpp" ):
processInfoFiles += [ "util/processinfo_" + os.sys.platform + ".cpp" ]
+elif os.sys.platform == "linux3":
+ processInfoFiles += [ "util/processinfo_linux2.cpp" ]
else:
processInfoFiles += [ "util/processinfo_none.cpp" ]
coreServerFiles += processInfoFiles
-
-
if has_option( "asio" ):
- coreServerFiles += [ "util/message_server_asio.cpp" ]
+ coreServerFiles += [ "util/net/message_server_asio.cpp" ]
-serverOnlyFiles = Split( "util/logfile.cpp util/alignedbuilder.cpp db/mongommf.cpp db/dur.cpp db/durop.cpp db/dur_writetodatafiles.cpp db/dur_preplogbuffer.cpp db/dur_commitjob.cpp db/dur_recover.cpp db/dur_journal.cpp db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/repl/rs.cpp db/repl/consensus.cpp db/repl/rs_initiate.cpp db/repl/replset_commands.cpp db/repl/manager.cpp db/repl/health.cpp db/repl/heartbeat.cpp db/repl/rs_config.cpp db/repl/rs_rollback.cpp db/repl/rs_sync.cpp db/repl/rs_initialsync.cpp db/oplog.cpp db/repl_block.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/cap.cpp db/matcher_covered.cpp db/dbeval.cpp db/restapi.cpp db/dbhelpers.cpp db/instance.cpp db/client.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/security.cpp db/queryoptimizer.cpp db/extsort.cpp db/cmdline.cpp" )
+# mongod files - also files used in tools. present in dbtests, but not in mongos and not in client libs.
+serverOnlyFiles = Split( "util/compress.cpp db/key.cpp db/btreebuilder.cpp util/logfile.cpp util/alignedbuilder.cpp db/mongommf.cpp db/dur.cpp db/durop.cpp db/dur_writetodatafiles.cpp db/dur_preplogbuffer.cpp db/dur_commitjob.cpp db/dur_recover.cpp db/dur_journal.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/repl/rs.cpp db/repl/consensus.cpp db/repl/rs_initiate.cpp db/repl/replset_commands.cpp db/repl/manager.cpp db/repl/health.cpp db/repl/heartbeat.cpp db/repl/rs_config.cpp db/repl/rs_rollback.cpp db/repl/rs_sync.cpp db/repl/rs_initialsync.cpp db/oplog.cpp db/repl_block.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/cap.cpp db/matcher_covered.cpp db/dbeval.cpp db/restapi.cpp db/dbhelpers.cpp db/instance.cpp db/client.cpp db/database.cpp db/pdfile.cpp db/record.cpp db/cursor.cpp db/security.cpp db/queryoptimizer.cpp db/queryoptimizercursor.cpp db/extsort.cpp db/cmdline.cpp" )
-serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/geo/*.cpp" )
+serverOnlyFiles += [ "db/index.cpp" , "db/scanandorder.cpp" ] + Glob( "db/geo/*.cpp" ) + Glob( "db/ops/*.cpp" )
serverOnlyFiles += [ "db/dbcommands.cpp" , "db/dbcommands_admin.cpp" ]
serverOnlyFiles += Glob( "db/commands/*.cpp" )
@@ -361,10 +389,8 @@ elif usev8:
else:
scriptingFiles += [ "scripting/engine_none.cpp" ]
-coreServerFiles += scriptingFiles
-
coreShardFiles = [ "s/config.cpp" , "s/grid.cpp" , "s/chunk.cpp" , "s/shard.cpp" , "s/shardkey.cpp" ]
-shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/client.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" , "s/writeback_listener.cpp" , "s/shard_version.cpp" ]
+shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/client.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" , "s/writeback_listener.cpp" , "s/shard_version.cpp", "s/mr_shard.cpp", "s/security.cpp" ]
serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" , "s/d_writeback.cpp" , "s/d_migrate.cpp" , "s/d_state.cpp" , "s/d_split.cpp" , "client/distlock_test.cpp" , "s/d_chunk_manager.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
@@ -463,7 +489,7 @@ if "darwin" == os.sys.platform:
env.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
env.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib"]) )
-elif "linux2" == os.sys.platform:
+elif "linux2" == os.sys.platform or "linux3" == os.sys.platform:
linux = True
platform = "linux"
@@ -508,6 +534,9 @@ elif "win32" == os.sys.platform:
#if force64:
# release = True
+ if has_option( "win2008plus" ):
+ env.Append( CPPDEFINES=[ "MONGO_USE_SRW_ON_WINDOWS" ] )
+
for pathdir in env['ENV']['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(pathdir, 'cl.exe')):
print( "found visual studio at " + pathdir )
@@ -541,20 +570,14 @@ elif "win32" == os.sys.platform:
boostLibs = []
- env.Append(CPPPATH=[ "js/src/" ])
- env.Append(CPPPATH=["../js/src/"])
- env.Append(LIBPATH=["../js/src"])
- env.Append(LIBPATH=["../js/"])
-
- env.Append( CPPDEFINES=[ "OLDJS" ] )
env.Append( CPPDEFINES=[ "_UNICODE" ] )
env.Append( CPPDEFINES=[ "UNICODE" ] )
winSDKHome = findVersion( [ "C:/Program Files/Microsoft SDKs/Windows/", "C:/Program Files (x86)/Microsoft SDKs/Windows/" ] ,
- [ "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
+ [ "v7.1", "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
print( "Windows SDK Root '" + winSDKHome + "'" )
- env.Append( CPPPATH=[ boostDir , "pcre-7.4" , winSDKHome + "/Include" ] )
+ env.Append( CPPPATH=[ boostDir , winSDKHome + "/Include" ] )
# consider adding /MP build with multiple processes option.
@@ -565,23 +588,25 @@ elif "win32" == os.sys.platform:
# some warnings we don't like:
env.Append( CPPFLAGS=" /wd4355 /wd4800 /wd4267 /wd4244 " )
- env.Append( CPPDEFINES=["WIN32","_CONSOLE","_CRT_SECURE_NO_WARNINGS","HAVE_CONFIG_H","PCRE_STATIC","SUPPORT_UCP","SUPPORT_UTF8","PSAPI_VERSION=1" ] )
+ # PSAPI_VERSION relates to process api dll Psapi.dll.
+ env.Append( CPPDEFINES=["_CONSOLE","_CRT_SECURE_NO_WARNINGS","PSAPI_VERSION=1" ] )
- #env.Append( CPPFLAGS=' /Yu"pch.h" ' ) # this would be for pre-compiled headers, could play with it later
+ # this would be for pre-compiled headers, could play with it later
+ #env.Append( CPPFLAGS=' /Yu"pch.h" ' )
- # docs say don't use /FD from command line
- # /Gy funtion level linking
+ # docs say don't use /FD from command line (minimal rebuild)
+ # /Gy function level linking
# /Gm is minimal rebuild, but may not work in parallel mode.
if release:
env.Append( CPPDEFINES=[ "NDEBUG" ] )
- env.Append( CPPFLAGS= " /O2 /MT /Gy /Zi /TP /errorReport:none " )
+ env.Append( CPPFLAGS= " /O2 /Gy " )
+ env.Append( CPPFLAGS= " /MT /Zi /TP /errorReport:none " )
# TODO: this has caused some linking problems :
# /GL whole program optimization
# /LTCG link time code generation
env.Append( CPPFLAGS= " /GL " )
env.Append( LINKFLAGS=" /LTCG " )
else:
-
# /Od disable optimization
# /ZI debug info w/edit & continue
# /TP it's a c++ file
@@ -596,10 +621,6 @@ elif "win32" == os.sys.platform:
if debugLogging:
env.Append( CPPDEFINES=[ "_DEBUG" ] )
- if os.path.exists("../readline/lib") :
- env.Append( LIBPATH=["../readline/lib"] )
- env.Append( CPPPATH=["../readline/include"] )
-
if force64 and os.path.exists( boostDir + "/lib/vs2010_64" ):
env.Append( LIBPATH=[ boostDir + "/lib/vs2010_64" ] )
elif not force64 and os.path.exists( boostDir + "/lib/vs2010_32" ):
@@ -618,26 +639,6 @@ elif "win32" == os.sys.platform:
else:
env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRT " )
- def pcreFilter(x):
- name = x.name
- if x.name.endswith( "dftables.c" ):
- return False
- if x.name.endswith( "pcredemo.c" ):
- return False
- if x.name.endswith( "pcretest.c" ):
- return False
- if x.name.endswith( "unittest.cc" ):
- return False
- if x.name.endswith( "pcregrep.c" ):
- return False
- return True
-
- pcreFiles = []
- pcreFiles += filter( pcreFilter , Glob( "pcre-7.4/*.c" ) )
- pcreFiles += filter( pcreFilter , Glob( "pcre-7.4/*.cc" ) )
- commonFiles += pcreFiles
- allClientFiles += pcreFiles
-
winLibString = "ws2_32.lib kernel32.lib advapi32.lib Psapi.lib"
if force64:
@@ -668,11 +669,15 @@ if nix:
if has_option( "distcc" ):
env["CXX"] = "distcc " + env["CXX"]
+ # -Winvalid-pch Warn if a precompiled header (see Precompiled Headers) is found in the search path but can't be used.
env.Append( CPPFLAGS="-fPIC -fno-strict-aliasing -ggdb -pthread -Wall -Wsign-compare -Wno-unknown-pragmas -Winvalid-pch" )
# env.Append( " -Wconversion" ) TODO: this doesn't really work yet
if linux:
env.Append( CPPFLAGS=" -Werror " )
- env.Append( CPPFLAGS=" -fno-builtin-memcmp " ) # glibc's memcmp is faster than gcc's
+ if not has_option('clang'):
+ env.Append( CPPFLAGS=" -fno-builtin-memcmp " ) # glibc's memcmp is faster than gcc's
+
+ env.Append( CPPDEFINES="_FILE_OFFSET_BITS=64" )
env.Append( CXXFLAGS=" -Wnon-virtual-dtor " )
env.Append( LINKFLAGS=" -fPIC -pthread -rdynamic" )
env.Append( LIBS=[] )
@@ -688,7 +693,7 @@ if nix:
env.Append( CPPFLAGS=" -O0 -fstack-protector " );
env['ENV']['GLIBCXX_FORCE_NEW'] = 1; # play nice with valgrind
else:
- env.Append( CPPFLAGS=" -O3" )
+ env.Append( CPPFLAGS=" -O3 " )
#env.Append( CPPFLAGS=" -fprofile-generate" )
#env.Append( LINKFLAGS=" -fprofile-generate" )
# then:
@@ -717,26 +722,59 @@ if nix:
# pre-compiled headers
if usePCH and 'Gch' in dir( env ):
print( "using precompiled headers" )
+ if has_option('clang'):
+ #env['GCHSUFFIX'] = '.pch' # clang++ uses pch.h.pch rather than pch.h.gch
+ #env.Prepend( CXXFLAGS=' -include pch.h ' ) # clang++ only uses pch from command line
+ print( "ERROR: clang pch is broken for now" )
+ Exit(1)
env['Gch'] = env.Gch( [ "pch.h" ] )[0]
elif os.path.exists('pch.h.gch'):
print( "removing precompiled headers" )
os.unlink('pch.h.gch') # gcc uses the file if it exists
if usev8:
- env.Append( CPPPATH=["../v8/include/"] )
- env.Append( LIBPATH=["../v8/"] )
-
+ env.Prepend( CPPPATH=["../v8/include/"] )
+ env.Prepend( LIBPATH=["../v8/"] )
if "uname" in dir(os):
hacks = buildscripts.findHacks( os.uname() )
if hacks is not None:
hacks.insert( env , { "linux64" : linux64 } )
+if has_option( "ssl" ):
+ env.Append( CPPDEFINES=["MONGO_SSL"] )
+ env.Append( LIBS=["ssl"] )
+ if darwin:
+ env.Append( LIBS=["crypto"] )
+
try:
umask = os.umask(022)
except OSError:
pass
+if not windows:
+ for keysuffix in [ "1" , "2" ]:
+ keyfile = "jstests/libs/key%s" % keysuffix
+ os.chmod( keyfile , stat.S_IWUSR|stat.S_IRUSR )
+
+for x in os.listdir( "third_party" ):
+ if not x.endswith( ".py" ) or x.find( "#" ) >= 0:
+ continue
+
+ shortName = x.rpartition( "." )[0]
+ path = "third_party/%s" % x
+
+
+ myModule = imp.load_module( "third_party_%s" % shortName , open( path , "r" ) , path , ( ".py" , "r" , imp.PY_SOURCE ) )
+ fileLists = { "commonFiles" : commonFiles , "serverOnlyFiles" : serverOnlyFiles , "scriptingFiles" : scriptingFiles }
+
+ options_topass["windows"] = windows
+ options_topass["nix"] = nix
+
+ myModule.configure( env , fileLists , options_topass )
+
+coreServerFiles += scriptingFiles
+
# --- check system ---
def getSysInfo():
@@ -783,7 +821,7 @@ def bigLibString( myenv ):
return s
-def doConfigure( myenv , needPcre=True , shell=False ):
+def doConfigure( myenv , shell=False ):
conf = Configure(myenv)
myenv["LINKFLAGS_CLEAN"] = list( myenv["LINKFLAGS"] )
myenv["LIBS_CLEAN"] = list( myenv["LIBS"] )
@@ -836,10 +874,6 @@ def doConfigure( myenv , needPcre=True , shell=False ):
return False
- if needPcre and not conf.CheckCXXHeader( 'pcrecpp.h' ):
- print( "can't find pcre" )
- Exit(1)
-
if not conf.CheckCXXHeader( "boost/filesystem/operations.hpp" ):
print( "can't find boost headers" )
if shell:
@@ -866,10 +900,6 @@ def doConfigure( myenv , needPcre=True , shell=False ):
if not conf.CheckCXXHeader( "execinfo.h" ):
myenv.Append( CPPDEFINES=[ "NOEXECINFO" ] )
- if nix and needPcre:
- myCheckLib( "pcrecpp" , True )
- myCheckLib( "pcre" , True )
-
myenv["_HAVEPCAP"] = myCheckLib( ["pcap", "wpcap"] )
removeIfInList( myenv["LIBS"] , "pcap" )
removeIfInList( myenv["LIBS"] , "wpcap" )
@@ -880,76 +910,15 @@ def doConfigure( myenv , needPcre=True , shell=False ):
else:
m.configure( conf , myenv )
- # XP_* is for spidermonkey.
- # this is outside of usesm block so don't have to rebuild for java
- if windows:
- myenv.Append( CPPDEFINES=[ "XP_WIN" ] )
- else:
- myenv.Append( CPPDEFINES=[ "XP_UNIX" ] )
-
if solaris:
conf.CheckLib( "nsl" )
- if usesm:
-
- # see http://www.mongodb.org/pages/viewpageattachments.action?pageId=12157032
- J = [ "mozjs" , "js", "js_static" ]
- if windows:
- if msarch == "amd64":
- if release:
- J = [ "js64r", "js", "mozjs" , "js_static" ]
- else:
- J = "js64d"
- print( "looking for js64d.lib for spidermonkey. (available at mongodb.org prebuilt)" );
- else:
- if not force32:
- print( "Assuming a 32 bit build is desired" )
- if release:
- J = [ "js32r", "js", "mozjs" , "js_static" ]
- else:
- J = [ "js32d", "js", "mozjs" , "js_static" ]
-
- myCheckLib( J , True )
- mozHeader = "js"
- if bigLibString(myenv).find( "mozjs" ) >= 0:
- mozHeader = "mozjs"
-
- if not conf.CheckHeader( mozHeader + "/jsapi.h" ):
- if conf.CheckHeader( "jsapi.h" ):
- myenv.Append( CPPDEFINES=[ "OLDJS" ] )
- else:
- print( "no spider monkey headers!" )
- Exit(1)
-
if usev8:
if debugBuild:
myCheckLib( [ "v8_g" , "v8" ] , True )
else:
myCheckLib( "v8" , True )
- if shell:
- haveReadLine = False
- if darwin:
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- if force64:
- myCheckLib( "readline" , True )
- myCheckLib( "ncurses" , True )
- else:
- myenv.Append( LINKFLAGS=" /usr/lib/libreadline.dylib " )
- elif openbsd:
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- myCheckLib( "termcap" , True )
- myCheckLib( "readline" , True )
- elif myCheckLib( "readline" , release and nix , staticOnly=release ):
- myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
- myCheckLib( "ncurses" , staticOnly=release )
- myCheckLib( "tinfo" , staticOnly=release )
- else:
- print( "\n*** notice: no readline library, mongo shell will not have nice interactive line editing ***\n" )
-
- if linux:
- myCheckLib( "rt" , True )
-
# requires ports devel/libexecinfo to be installed
if freebsd or openbsd:
myCheckLib( "execinfo", True )
@@ -1006,6 +975,10 @@ def doConfigure( myenv , needPcre=True , shell=False ):
myenv.Append(LINKCOM=" $STATICFILES")
myenv.Append(STATICFILES=staticlibfiles)
+ if has_option( "tcmalloc" ):
+ myCheckLib( "tcmalloc" , True ); # if successful, appedded 'tcmalloc' to myenv[ LIBS ]
+
+
return conf.Finish()
env = doConfigure( env )
@@ -1076,8 +1049,13 @@ clientEnv.Prepend( LIBS=[ "mongoclient"] )
clientEnv.Prepend( LIBPATH=["."] )
clientEnv["CPPDEFINES"].remove( "MONGO_EXPOSE_MACROS" )
l = clientEnv[ "LIBS" ]
-removeIfInList( l , "pcre" )
-removeIfInList( l , "pcrecpp" )
+
+# profile guided
+#if windows:
+# if release:
+# env.Append( LINKFLAGS="/PGD:test.pgd" )
+# env.Append( LINKFLAGS="/LTCG:PGINSTRUMENT" )
+# env.Append( LINKFLAGS="/LTCG:PGOPTIMIZE" )
testEnv = env.Clone()
testEnv.Append( CPPPATH=["../"] )
@@ -1095,7 +1073,7 @@ def checkErrorCodes():
checkErrorCodes()
# main db target
-mongodOnlyFiles = [ "db/db.cpp" ]
+mongodOnlyFiles = [ "db/db.cpp", "db/compact.cpp" ]
if windows:
mongodOnlyFiles.append( "util/ntservice.cpp" )
mongod = env.Program( "mongod" , commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + mongodOnlyFiles )
@@ -1103,7 +1081,7 @@ Default( mongod )
# tools
allToolFiles = commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + [ "client/gridfs.cpp", "tools/tool.cpp" ]
-normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" ]
+normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" , "top" ]
env.Alias( "tools" , [ add_exe( "mongo" + x ) for x in normalTools ] )
for x in normalTools:
env.Program( "mongo" + x , allToolFiles + [ "tools/" + x + ".cpp" ] )
@@ -1133,7 +1111,7 @@ clientTests += [ clientEnv.Program( "authTest" , [ "client/examples/authTest.cpp
clientTests += [ clientEnv.Program( "httpClientTest" , [ "client/examples/httpClientTest.cpp" ] ) ]
clientTests += [ clientEnv.Program( "bsondemo" , [ "bson/bsondemo/bsondemo.cpp" ] ) ]
-# testing
+# dbtests test binary
test = testEnv.Program( "test" , Glob( "dbtests/*.cpp" ) )
if windows:
testEnv.Alias( "test" , "test.exe" )
@@ -1144,17 +1122,23 @@ clientTests += [ clientEnv.Program( "clientTest" , [ "client/examples/clientTest
mongosniff_built = False
if darwin or clientEnv["_HAVEPCAP"]:
mongosniff_built = True
- sniffEnv = clientEnv.Clone()
+ sniffEnv = env.Clone()
sniffEnv.Append( CPPDEFINES="MONGO_EXPOSE_MACROS" )
+
if not windows:
sniffEnv.Append( LIBS=[ "pcap" ] )
else:
sniffEnv.Append( LIBS=[ "wpcap" ] )
+
+ sniffEnv.Prepend( LIBPATH=["."] )
+ sniffEnv.Append( LIBS=[ "mongotestfiles" ] )
+
sniffEnv.Program( "mongosniff" , "tools/sniffer.cpp" )
# --- shell ---
-env.JSHeader( "shell/mongo.cpp" , ["shell/utils.js","shell/db.js","shell/mongo.js","shell/mr.js","shell/query.js","shell/collection.js"] )
+# note, if you add a file here, need to add in engine.cpp currently
+env.JSHeader( "shell/mongo.cpp" , Glob( "shell/utils*.js" ) + [ "shell/db.js","shell/mongo.js","shell/mr.js","shell/query.js","shell/collection.js"] )
env.JSHeader( "shell/mongo-server.cpp" , [ "shell/servers.js"] )
@@ -1168,50 +1152,21 @@ if release and ( ( darwin and force64 ) or linux64 ):
if noshell:
print( "not building shell" )
elif not onlyServer:
- weird = force64 and not windows and not solaris
-
- if weird:
- shellEnv["CFLAGS"].remove("-m64")
- shellEnv["CXXFLAGS"].remove("-m64")
- shellEnv["LINKFLAGS"].remove("-m64")
- shellEnv["CPPPATH"].remove( "/usr/64/include" )
- shellEnv["LIBPATH"].remove( "/usr/64/lib" )
- shellEnv.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
- shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib", "/usr/local/lib" ]) )
-
l = shellEnv["LIBS"]
- removeIfInList( l , "pcre" )
- removeIfInList( l , "pcrecpp" )
-
if windows:
shellEnv.Append( LIBS=["winmm.lib"] )
coreShellFiles = [ "shell/dbshell.cpp" , "shell/shell_utils.cpp" , "shell/mongo-server.cpp" ]
- if weird:
- shell32BitFiles = coreShellFiles
- for f in allClientFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- for f in scriptingFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- for f in processInfoFiles:
- shell32BitFiles.append( "32bit/" + str( f ) )
- shellEnv.VariantDir( "32bit" , "." , duplicate=1 )
- else:
- shellEnv.Prepend( LIBPATH=[ "." ] )
-
- shellEnv = doConfigure( shellEnv , needPcre=False , shell=True )
+ coreShellFiles.append( "third_party/linenoise/linenoise.cpp" )
- if weird:
- mongo = shellEnv.Program( "mongo" , shell32BitFiles )
- else:
- shellEnv.Prepend( LIBS=[ "mongoshellfiles"] )
- mongo = shellEnv.Program( "mongo" , coreShellFiles )
+ shellEnv.Prepend( LIBPATH=[ "." ] )
+
+ shellEnv = doConfigure( shellEnv , shell=True )
- if weird:
- Depends( "32bit/shell/mongo.cpp" , "shell/mongo.cpp" )
- Depends( "32bit/shell/mongo-server.cpp" , "shell/mongo-server.cpp" )
+ shellEnv.Prepend( LIBS=[ "mongoshellfiles"] )
+ mongo = shellEnv.Program( "mongo" , coreShellFiles )
# ---- RUNNING TESTS ----
@@ -1258,7 +1213,7 @@ if not onlyServer and not noshell:
addSmoketest( "smokeClone", [ "mongo", "mongod" ] )
addSmoketest( "smokeRepl", [ "mongo", "mongod", "mongobridge" ] )
addSmoketest( "smokeReplSets", [ "mongo", "mongod", "mongobridge" ] )
- addSmoketest( "smokeDur", [ add_exe( "mongo" ) , add_exe( "mongod" ) ] )
+ addSmoketest( "smokeDur", [ add_exe( "mongo" ) , add_exe( "mongod" ) , add_exe('mongorestore') ] )
addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongodump" ), add_exe( "mongorestore" ) ] )
addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
@@ -1500,7 +1455,7 @@ env.Alias( "core" , [ add_exe( "mongo" ) , add_exe( "mongod" ) , add_exe( "mongo
#headers
if installSetup.headers:
- for id in [ "", "util/", "util/mongoutils/", "util/concurrency/", "db/" , "db/stats/" , "db/repl/" , "client/" , "bson/", "bson/util/" , "s/" , "scripting/" ]:
+ for id in [ "", "util/", "util/net/", "util/mongoutils/", "util/concurrency/", "db/" , "db/stats/" , "db/repl/" , "db/ops/" , "client/" , "bson/", "bson/util/" , "s/" , "scripting/" ]:
env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.h" ) )
env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.hpp" ) )
diff --git a/bson/bson-inl.h b/bson/bson-inl.h
index 5b4c490..b86d667 100644
--- a/bson/bson-inl.h
+++ b/bson/bson-inl.h
@@ -1,4 +1,7 @@
-// bsoninlines.h
+/** @file bsoninlines.h
+ a goal here is that the most common bson methods can be used inline-only, a la boost.
+ thus some things are inline that wouldn't necessarily be otherwise.
+*/
/* Copyright 2009 10gen Inc.
*
@@ -18,18 +21,158 @@
#pragma once
#include <map>
-#include "util/atomic_int.h"
-#include "util/misc.h"
-#include "../util/hex.h"
+#include <limits>
+
+#if defined(_WIN32)
+#undef max
+#undef min
+#endif
namespace mongo {
- inline BSONObjIterator BSONObj::begin() {
+ inline bool isNaN(double d) {
+ return d != d;
+ }
+
+ /* must be same type when called, unless both sides are #s
+ this large function is in header to facilitate inline-only use of bson
+ */
+ inline int compareElementValues(const BSONElement& l, const BSONElement& r) {
+ int f;
+
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined: // EOO and Undefined are same canonicalType
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ f = l.canonicalType() - r.canonicalType();
+ if ( f<0 ) return -1;
+ return f==0 ? 0 : 1;
+ case Bool:
+ return *l.value() - *r.value();
+ case Timestamp:
+ // unsigned compare for timestamps - note they are not really dates but (ordinal + time_t)
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case Date:
+ {
+ long long a = (long long) l.Date().millis;
+ long long b = (long long) r.Date().millis;
+ if( a < b )
+ return -1;
+ return a == b ? 0 : 1;
+ }
+ case NumberLong:
+ if( r.type() == NumberLong ) {
+ long long L = l._numberLong();
+ long long R = r._numberLong();
+ if( L < R ) return -1;
+ if( L == R ) return 0;
+ return 1;
+ }
+ goto dodouble;
+ case NumberInt:
+ if( r.type() == NumberInt ) {
+ int L = l._numberInt();
+ int R = r._numberInt();
+ if( L < R ) return -1;
+ return L == R ? 0 : 1;
+ }
+ // else fall through
+ case NumberDouble:
+dodouble:
+ {
+ double left = l.number();
+ double right = r.number();
+ if( left < right )
+ return -1;
+ if( left == right )
+ return 0;
+ if( isNaN(left) )
+ return isNaN(right) ? 0 : -1;
+ return 1;
+ }
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ /* todo: a utf sort order version one day... */
+ {
+ // we use memcmp as we allow zeros in UTF8 strings
+ int lsz = l.valuestrsize();
+ int rsz = r.valuestrsize();
+ int common = min(lsz, rsz);
+ int res = memcmp(l.valuestr(), r.valuestr(), common);
+ if( res )
+ return res;
+ // longer string is the greater one
+ return lsz-rsz;
+ }
+ case Object:
+ case Array:
+ return l.embeddedObject().woCompare( r.embeddedObject() );
+ case DBRef: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1);
+ }
+ case RegEx: {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case CodeWScope : {
+ f = l.canonicalType() - r.canonicalType();
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
+ if ( f )
+ return f;
+ return 0;
+ }
+ default:
+ assert( false);
+ }
+ return -1;
+ }
+
+ /* wo = "well ordered" */
+ inline int BSONElement::woCompare( const BSONElement &e,
+ bool considerFieldName ) const {
+ int lt = (int) canonicalType();
+ int rt = (int) e.canonicalType();
+ int x = lt - rt;
+ if( x != 0 && (!isNumber() || !e.isNumber()) )
+ return x;
+ if ( considerFieldName ) {
+ x = strcmp(fieldName(), e.fieldName());
+ if ( x != 0 )
+ return x;
+ }
+ x = compareElementValues(*this, e);
+ return x;
+ }
+
+ inline BSONObjIterator BSONObj::begin() const {
return BSONObjIterator(*this);
}
inline BSONObj BSONElement::embeddedObjectUserCheck() const {
- if ( isABSONObj() )
+ if ( MONGO_likely(isABSONObj()) )
return BSONObj(value());
stringstream ss;
ss << "invalid parameter: expected an object (" << fieldName() << ")";
@@ -48,6 +191,21 @@ namespace mongo {
return BSONObj( value() + 4 + 4 + strSizeWNull );
}
+ // deep (full) equality
+ inline bool BSONObj::equal(const BSONObj &rhs) const {
+ BSONObjIterator i(*this);
+ BSONObjIterator j(rhs);
+ BSONElement l,r;
+ do {
+ // so far, equal...
+ l = i.next();
+ r = j.next();
+ if ( l.eoo() )
+ return r.eoo();
+ } while( l == r );
+ return false;
+ }
+
inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
StringBuilder ss;
int os = objsize();
@@ -64,9 +222,10 @@ namespace mongo {
getOwned() method. the presumption being that is better.
*/
inline NOINLINE_DECL BSONObj BSONObj::copy() const {
- char *p = (char*) malloc(objsize());
- memcpy(p, objdata(), objsize());
- return BSONObj(p, true);
+ Holder *h = (Holder*) malloc(objsize() + sizeof(unsigned));
+ h->zero();
+ memcpy(h->data, objdata(), objsize());
+ return BSONObj(h);
}
inline BSONObj BSONObj::getOwned() const {
@@ -88,16 +247,18 @@ namespace mongo {
return b.obj();
}
- inline bool BSONObj::hasElement(const char *name) const {
- if ( !isEmpty() ) {
- BSONObjIterator it(*this);
- while ( it.moreWithEOO() ) {
- BSONElement e = it.next();
- if ( strcmp(name, e.fieldName()) == 0 )
- return true;
+ inline void BSONObj::getFields(unsigned n, const char **fieldNames, BSONElement *fields) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *p = e.fieldName();
+ for( unsigned i = 0; i < n; i++ ) {
+ if( strcmp(p, fieldNames[i]) == 0 ) {
+ fields[i] = e;
+ break;
+ }
}
}
- return false;
}
inline BSONElement BSONObj::getField(const StringData& name) const {
@@ -110,6 +271,21 @@ namespace mongo {
return BSONElement();
}
+ inline int BSONObj::getIntField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.isNumber() ? (int) e.number() : std::numeric_limits< int >::min();
+ }
+
+ inline bool BSONObj::getBoolField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == Bool ? e.boolean() : false;
+ }
+
+ inline const char * BSONObj::getStringField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == String ? e.valuestr() : "";
+ }
+
/* add all the fields from the object specified to this object */
inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
BSONObjIterator it(x);
@@ -141,7 +317,7 @@ namespace mongo {
}
- inline bool BSONObj::isValid() {
+ inline bool BSONObj::isValid() const {
int x = objsize();
return x > 0 && x <= BSONObjMaxInternalSize;
}
@@ -302,8 +478,6 @@ namespace mongo {
s << ( isArray ? " ]" : " }" );
}
- extern unsigned getRandomNumber();
-
inline void BSONElement::validate() const {
const BSONType t = type();
@@ -398,7 +572,7 @@ namespace mongo {
break;
case RegEx: {
const char *p = value();
- size_t len1 = ( maxLen == -1 ) ? strlen( p ) : mongo::strnlen( p, remain );
+ size_t len1 = ( maxLen == -1 ) ? strlen( p ) : (size_t)mongo::strnlen( p, remain );
//massert( 10318 , "Invalid regex string", len1 != -1 ); // ERH - 4/28/10 - don't think this does anything
p = p + len1 + 1;
size_t len2;
@@ -417,7 +591,7 @@ namespace mongo {
StringBuilder ss;
ss << "BSONElement: bad type " << (int) type();
string msg = ss.str();
- massert( 10320 , msg.c_str(),false);
+ massert( 13655 , msg.c_str(),false);
}
}
totalSize = x + fieldNameSize() + 1; // BSONType
@@ -425,6 +599,72 @@ namespace mongo {
return totalSize;
}
+ inline int BSONElement::size() const {
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int x = 0;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case mongo::Bool:
+ x = 1;
+ break;
+ case NumberInt:
+ x = 4;
+ break;
+ case Timestamp:
+ case mongo::Date:
+ case NumberDouble:
+ case NumberLong:
+ x = 8;
+ break;
+ case jstOID:
+ x = 12;
+ break;
+ case Symbol:
+ case Code:
+ case mongo::String:
+ x = valuestrsize() + 4;
+ break;
+ case DBRef:
+ x = valuestrsize() + 4 + 12;
+ break;
+ case CodeWScope:
+ case Object:
+ case mongo::Array:
+ x = objsize();
+ break;
+ case BinData:
+ x = valuestrsize() + 4 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ size_t len1 = strlen(p);
+ p = p + len1 + 1;
+ size_t len2;
+ len2 = strlen( p );
+ x = (int) (len1 + 1 + len2 + 1);
+ }
+ break;
+ default:
+ {
+ StringBuilder ss;
+ ss << "BSONElement: bad type " << (int) type();
+ string msg = ss.str();
+ massert(10320 , msg.c_str(),false);
+ }
+ }
+ totalSize = x + fieldNameSize() + 1; // BSONType
+
+ return totalSize;
+ }
+
inline string BSONElement::toString( bool includeFieldName, bool full ) const {
StringBuilder s;
toString(s, includeFieldName, full);
@@ -438,7 +678,7 @@ namespace mongo {
s << "EOO";
break;
case mongo::Date:
- s << "new Date(" << date() << ')';
+ s << "new Date(" << (long long) date() << ')';
break;
case RegEx: {
s << "/" << regex() << '/';
@@ -492,8 +732,8 @@ namespace mongo {
case Symbol:
case mongo::String:
s << '"';
- if ( !full && valuestrsize() > 80 ) {
- s.write(valuestr(), 70);
+ if ( !full && valuestrsize() > 160 ) {
+ s.write(valuestr(), 150);
s << "...\"";
}
else {
@@ -662,4 +902,87 @@ namespace mongo {
b.append( q , t );
return BSONFieldValue<BSONObj>( _name , b.obj() );
}
+
+ // used by jsonString()
+ inline string escape( string s , bool escape_slash=false) {
+ StringBuilder ret;
+ for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
+ switch ( *i ) {
+ case '"':
+ ret << "\\\"";
+ break;
+ case '\\':
+ ret << "\\\\";
+ break;
+ case '/':
+ ret << (escape_slash ? "\\/" : "/");
+ break;
+ case '\b':
+ ret << "\\b";
+ break;
+ case '\f':
+ ret << "\\f";
+ break;
+ case '\n':
+ ret << "\\n";
+ break;
+ case '\r':
+ ret << "\\r";
+ break;
+ case '\t':
+ ret << "\\t";
+ break;
+ default:
+ if ( *i >= 0 && *i <= 0x1f ) {
+ //TODO: these should be utf16 code-units not bytes
+ char c = *i;
+ ret << "\\u00" << toHexLower(&c, 1);
+ }
+ else {
+ ret << *i;
+ }
+ }
+ }
+ return ret.str();
+ }
+
+ inline string BSONObj::hexDump() const {
+ stringstream ss;
+ const char *d = objdata();
+ int size = objsize();
+ for( int i = 0; i < size; ++i ) {
+ ss.width( 2 );
+ ss.fill( '0' );
+ ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
+ if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
+ ss << '\'' << d[ i ] << '\'';
+ if ( i != size - 1 )
+ ss << ' ';
+ }
+ return ss.str();
+ }
+
+ inline void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ) {
+ BSONObjIterator i(keyPattern);
+ BSONObjIterator j(values);
+
+ while ( i.more() && j.more() ) {
+ appendAs( j.next() , i.next().fieldName() );
+ }
+
+ assert( ! i.more() );
+ assert( ! j.more() );
+ }
+
+ inline BSONObj BSONObj::removeField(const StringData& name) const {
+ BSONObjBuilder b;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if( strcmp(name.data(), fname) )
+ b.append(e);
+ }
+ return b.obj();
+ }
}
diff --git a/bson/bson.h b/bson/bson.h
index ba1b751..9515adf 100644
--- a/bson/bson.h
+++ b/bson/bson.h
@@ -1,11 +1,9 @@
-/* NOTE: Standalone bson header for when not using MongoDB.
- See also: bsondemo.
+/** @file bson.h
- MongoDB includes ../db/jsobj.h instead. This file, however, pulls in much less code / dependencies.
-*/
+ Main bson include file for mongodb c++ clients. MongoDB includes ../db/jsobj.h instead.
+ This file, however, pulls in much less code / dependencies.
-/** @file bson.h
- BSON classes
+ @see bsondemo
*/
/*
@@ -25,7 +23,7 @@
*/
/**
- bo and its helpers
+ Main include file for C++ BSON module when using standalone (sans MongoDB client).
"BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
represented in JSON (plus a few extensions useful for databases & other languages).
@@ -42,10 +40,11 @@
*/
#endif
+#include <cstdlib>
+#include <memory>
#include <iostream>
#include <sstream>
#include <boost/utility.hpp>
-#include "util/builder.h"
namespace bson {
@@ -56,7 +55,7 @@ namespace bson {
public:
assertion( unsigned u , const string& s )
: id( u ) , msg( s ) {
- mongo::StringBuilder ss;
+ stringstream ss;
ss << "BsonAssertion id: " << u << " " << s;
full = ss.str();
}
@@ -101,23 +100,11 @@ namespace mongo {
#endif
}
-#include "../bson/bsontypes.h"
-#include "../bson/oid.h"
-#include "../bson/bsonelement.h"
-#include "../bson/bsonobj.h"
-#include "../bson/bsonmisc.h"
-#include "../bson/bsonobjbuilder.h"
-#include "../bson/bsonobjiterator.h"
-#include "../bson/bson-inl.h"
-
-namespace mongo {
-
- inline unsigned getRandomNumber() {
-#if defined(_WIN32)
- return rand();
-#else
- return random();
-#endif
- }
-
-}
+#include "util/builder.h"
+#include "bsontypes.h"
+#include "oid.h"
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonobjbuilder.h"
+#include "bsonobjiterator.h"
+#include "bson-inl.h"
diff --git a/bson/bsondemo/bsondemo.cpp b/bson/bsondemo/bsondemo.cpp
index ec83f5e..b53a7b3 100644
--- a/bson/bsondemo/bsondemo.cpp
+++ b/bson/bsondemo/bsondemo.cpp
@@ -4,6 +4,12 @@
Requires boost (headers only).
Works headers only (the parts actually exercised herein that is - some functions require .cpp files).
+
+ To build and run:
+ g++ -o bsondemo bsondemo.cpp
+ ./bsondemo
+
+ Windows: project files are available in this directory for bsondemo.cpp for use with Visual Studio.
*/
/*
diff --git a/bson/bsondemo/bsondemo.vcxproj b/bson/bsondemo/bsondemo.vcxproj
index bb82a50..2ad5389 100644
--- a/bson/bsondemo/bsondemo.vcxproj
+++ b/bson/bsondemo/bsondemo.vcxproj
@@ -89,7 +89,7 @@
<ClCompile>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>No</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@@ -109,7 +109,7 @@
<ClCompile>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<PrecompiledHeader>
@@ -128,7 +128,7 @@
<ClCompile>
<Optimization>MaxSpeed</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>true</FunctionLevelLinking>
<PrecompiledHeader>
@@ -151,7 +151,7 @@
<ClCompile>
<Optimization>MaxSpeed</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>true</FunctionLevelLinking>
<PrecompiledHeader>
diff --git a/bson/bsonelement.h b/bson/bsonelement.h
index 23d59fa..5487d8d 100644
--- a/bson/bsonelement.h
+++ b/bson/bsonelement.h
@@ -20,6 +20,14 @@
#include <vector>
#include <string.h>
#include "util/builder.h"
+#include "bsontypes.h"
+
+namespace mongo {
+ class OpTime;
+ class BSONObj;
+ class BSONElement;
+ class BSONObjBuilder;
+}
namespace bson {
typedef mongo::BSONElement be;
@@ -29,9 +37,6 @@ namespace bson {
namespace mongo {
- class OpTime;
- class BSONElement;
-
/* l and r MUST have same type when called: check that first. */
int compareElementValues(const BSONElement& l, const BSONElement& r);
@@ -120,7 +125,8 @@ namespace mongo {
/** Size of the element.
@param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
*/
- int size( int maxLen = -1 ) const;
+ int size( int maxLen ) const;
+ int size() const;
/** Wrap this element up as a singleton object. */
BSONObj wrap() const;
@@ -155,15 +161,18 @@ namespace mongo {
return *value() ? true : false;
}
+ bool booleanSafe() const { return isBoolean() && boolean(); }
+
/** Retrieve a java style date value from the element.
Ensure element is of type Date before calling.
+ @see Bool(), trueValue()
*/
Date_t date() const {
return *reinterpret_cast< const Date_t* >( value() );
}
/** Convert the value to boolean, regardless of its type, in a javascript-like fashion
- (i.e., treat zero and null as false).
+ (i.e., treats zero and null and eoo as false).
*/
bool trueValue() const;
@@ -203,7 +212,9 @@ namespace mongo {
}
/** Size (length) of a string element.
- You must assure of type String first. */
+ You must assure of type String first.
+ @return string size including terminating null
+ */
int valuestrsize() const {
return *reinterpret_cast< const int* >( value() );
}
@@ -359,6 +370,7 @@ namespace mongo {
return *reinterpret_cast< const mongo::OID* >( start );
}
+ /** this does not use fieldName in the comparison, just the value */
bool operator<( const BSONElement& other ) const {
int x = (int)canonicalType() - (int)other.canonicalType();
if ( x < 0 ) return true;
@@ -366,19 +378,30 @@ namespace mongo {
return compareElementValues(*this,other) < 0;
}
- // If maxLen is specified, don't scan more than maxLen bytes.
- explicit BSONElement(const char *d, int maxLen = -1) : data(d) {
- fieldNameSize_ = -1;
- if ( eoo() )
+ // @param maxLen don't scan more than maxLen bytes
+ explicit BSONElement(const char *d, int maxLen) : data(d) {
+ if ( eoo() ) {
+ totalSize = 1;
fieldNameSize_ = 0;
+ }
else {
+ totalSize = -1;
+ fieldNameSize_ = -1;
if ( maxLen != -1 ) {
int size = (int) strnlen( fieldName(), maxLen - 1 );
massert( 10333 , "Invalid field name", size != -1 );
fieldNameSize_ = size + 1;
}
}
+ }
+
+ explicit BSONElement(const char *d) : data(d) {
+ fieldNameSize_ = -1;
totalSize = -1;
+ if ( eoo() ) {
+ fieldNameSize_ = 0;
+ totalSize = 1;
+ }
}
string _asCode() const;
@@ -399,7 +422,10 @@ namespace mongo {
const BSONElement& chk(int t) const {
if ( t != type() ) {
StringBuilder ss;
- ss << "wrong type for BSONElement (" << fieldName() << ") " << type() << " != " << t;
+ if( eoo() )
+ ss << "field not found, expected type " << t;
+ else
+ ss << "wrong type for field (" << fieldName() << ") " << type() << " != " << t;
uasserted(13111, ss.str() );
}
return *this;
@@ -477,7 +503,7 @@ namespace mongo {
return true;
}
- /** True if element is of a numeric type. */
+ /** @return true if element is of a numeric type. */
inline bool BSONElement::isNumber() const {
switch( type() ) {
case NumberLong:
diff --git a/bson/bsonmisc.h b/bson/bsonmisc.h
index 96be12a..8abb487 100644
--- a/bson/bsonmisc.h
+++ b/bson/bsonmisc.h
@@ -29,20 +29,16 @@ namespace mongo {
class BSONObjCmp {
public:
- BSONObjCmp( const BSONObj &_order = BSONObj() ) : order( _order ) {}
+ BSONObjCmp( const BSONObj &order = BSONObj() ) : _order( order ) {}
bool operator()( const BSONObj &l, const BSONObj &r ) const {
- return l.woCompare( r, order ) < 0;
+ return l.woCompare( r, _order ) < 0;
}
+ BSONObj order() const { return _order; }
private:
- BSONObj order;
+ BSONObj _order;
};
- class BSONObjCmpDefaultOrder : public BSONObjCmp {
- public:
- BSONObjCmpDefaultOrder() : BSONObjCmp( BSONObj() ) {}
- };
-
- typedef set< BSONObj, BSONObjCmpDefaultOrder > BSONObjSetDefaultOrder;
+ typedef set<BSONObj,BSONObjCmp> BSONObjSet;
enum FieldCompareResult {
LEFT_SUBFIELD = -2,
@@ -202,4 +198,6 @@ namespace mongo {
int _sizes[SIZE];
};
+ // considers order
+ bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
}
diff --git a/bson/bsonobj.h b/bson/bsonobj.h
index 3ca6b8c..9e948f3 100644
--- a/bson/bsonobj.h
+++ b/bson/bsonobj.h
@@ -17,15 +17,18 @@
#pragma once
+#include <boost/intrusive_ptr.hpp>
#include <set>
#include <list>
#include <vector>
+#include "util/atomic_int.h"
#include "util/builder.h"
#include "stringdata.h"
namespace mongo {
typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
+ typedef multiset< BSONElement, BSONElementCmpWithoutField > BSONElementMSet;
/**
C++ representation of a "BSON" object -- that is, an extended JSON-style
@@ -69,11 +72,19 @@ namespace mongo {
public:
/** Construct a BSONObj from data in the proper format.
- @param ifree true if the BSONObj should free() the msgdata when
- it destructs.
+ * Use this constructor when something else owns msgdata's buffer
*/
- explicit BSONObj(const char *msgdata, bool ifree = false) {
- init(msgdata, ifree);
+ explicit BSONObj(const char *msgdata) {
+ init(msgdata);
+ }
+
+ /** Construct a BSONObj from data in the proper format.
+ * Use this constructor when you want BSONObj to free(holder) when it is no longer needed
+ * BSONObj::Holder has an extra 4 bytes for a ref-count before the start of the object
+ */
+ class Holder;
+ explicit BSONObj(Holder* holder) {
+ init(holder);
}
explicit BSONObj(const Record *r);
@@ -81,7 +92,9 @@ namespace mongo {
/** Construct an empty BSONObj -- that is, {}. */
BSONObj();
- ~BSONObj() { /*defensive:*/ _objdata = 0; }
+ ~BSONObj() {
+ _objdata = 0; // defensive
+ }
/**
A BSONObj can use a buffer it "owns" or one it does not.
@@ -113,7 +126,9 @@ namespace mongo {
*/
bool isOwned() const { return _holder.get() != 0; }
- /* make sure the data buffer is under the control of this BSONObj and not a remote buffer */
+ /** assure the data buffer is under the control of this BSONObj and not a remote buffer
+ @see isOwned()
+ */
BSONObj getOwned() const;
/** @return a new full (and owned) copy of the object. */
@@ -133,6 +148,11 @@ namespace mongo {
/** note: addFields always adds _id even if not specified */
int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+ /** remove specified field and return a new object with the remaining fields.
+ slowish as builds a full new object
+ */
+ BSONObj removeField(const StringData& name) const;
+
/** returns # of top level fields in the object
note: iterates to count the fields
*/
@@ -141,20 +161,26 @@ namespace mongo {
/** adds the field names to the fields set. does NOT clear it (appends). */
int getFieldNames(set<string>& fields) const;
- /** return has eoo() true if no match
- supports "." notation to reach into embedded objects
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
*/
BSONElement getFieldDotted(const char *name) const;
- /** return has eoo() true if no match
- supports "." notation to reach into embedded objects
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
*/
BSONElement getFieldDotted(const string& name) const {
return getFieldDotted( name.c_str() );
}
- /** Like getFieldDotted(), but expands multikey arrays and returns all matching objects
+ /** Like getFieldDotted(), but expands arrays and returns all matching objects.
+ * Turning off expandLastArray allows you to retrieve nested array objects instead of
+ * their contents.
*/
- void getFieldsDotted(const StringData& name, BSONElementSet &ret ) const;
+ void getFieldsDotted(const StringData& name, BSONElementSet &ret, bool expandLastArray = true ) const;
+ void getFieldsDotted(const StringData& name, BSONElementMSet &ret, bool expandLastArray = true ) const;
+
/** Like getFieldDotted(), but returns first array encountered while traversing the
dotted fields of name. The name variable is updated to represent field
names with respect to the returned element. */
@@ -165,6 +191,14 @@ namespace mongo {
*/
BSONElement getField(const StringData& name) const;
+ /** Get several fields at once. This is faster than separate getField() calls as the size of
+ elements iterated can then be calculated only once each.
+ @param n number of fieldNames, and number of elements in the fields array
+ @param fields if a field is found its element is stored in its corresponding position in this array.
+ if not found the array element is unchanged.
+ */
+ void getFields(unsigned n, const char **fieldNames, BSONElement *fields) const;
+
/** Get the field of the specified name. eoo() is true on the returned
element if not found.
*/
@@ -184,7 +218,9 @@ namespace mongo {
}
/** @return true if field exists */
- bool hasField( const char * name ) const { return ! getField( name ).eoo(); }
+ bool hasField( const char * name ) const { return !getField(name).eoo(); }
+ /** @return true if field exists */
+ bool hasElement(const char *name) const { return hasField(name); }
/** @return "" if DNE or wrong type */
const char * getStringField(const char *name) const;
@@ -195,7 +231,9 @@ namespace mongo {
/** @return INT_MIN if not present - does some type conversions */
int getIntField(const char *name) const;
- /** @return false if not present */
+ /** @return false if not present
+ @see BSONElement::trueValue()
+ */
bool getBoolField(const char *name) const;
/**
@@ -224,7 +262,7 @@ namespace mongo {
int objsize() const { return *(reinterpret_cast<const int*>(objdata())); }
/** performs a cursory check on the object's size only. */
- bool isValid();
+ bool isValid() const;
/** @return if the user is a valid user doc
criter: isValid() no . or $ field names
@@ -255,7 +293,6 @@ namespace mongo {
int woCompare(const BSONObj& r, const BSONObj &ordering = BSONObj(),
bool considerFieldName=true) const;
-
bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
bool operator>( const BSONObj& other ) const { return woCompare( other ) > 0; }
@@ -266,10 +303,12 @@ namespace mongo {
*/
int woSortOrder( const BSONObj& r , const BSONObj& sortKey , bool useDotted=false ) const;
+ bool equal(const BSONObj& r) const;
+
/** This is "shallow equality" -- ints and doubles won't match. for a
deep equality test use woCompare (which is slower).
*/
- bool woEqual(const BSONObj& r) const {
+ bool binaryEqual(const BSONObj& r) const {
int os = objsize();
if ( os == r.objsize() ) {
return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
@@ -280,8 +319,13 @@ namespace mongo {
/** @return first field of the object */
BSONElement firstElement() const { return BSONElement(objdata() + 4); }
- /** @return true if field exists in the object */
- bool hasElement(const char *name) const;
+ /** faster than firstElement().fieldName() - for the first element we can easily find the fieldname without
+ computing the element size.
+ */
+ const char * firstElementFieldName() const {
+ const char *p = objdata() + 4;
+ return *p == EOO ? "" : p+1;
+ }
/** Get the _id field from the object. For good performance drivers should
assure that _id is the first element of the object; however, correct operation
@@ -315,9 +359,7 @@ namespace mongo {
/** @return an md5 value for this object. */
string md5() const;
- bool operator==( const BSONObj& other ) const {
- return woCompare( other ) == 0;
- }
+ bool operator==( const BSONObj& other ) const { return equal( other ); }
enum MatchType {
Equality = 0,
@@ -376,34 +418,52 @@ namespace mongo {
...
}
*/
- BSONObjIterator begin();
+ BSONObjIterator begin() const;
void appendSelfToBufBuilder(BufBuilder& b) const {
assert( objsize() );
b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
}
- private:
- class Holder {
+#pragma pack(1)
+ class Holder : boost::noncopyable {
+ private:
+ Holder(); // this class should never be explicitly created
+ AtomicUInt refCount;
public:
- Holder( const char *objdata ) :
- _objdata( objdata ) {
- }
- ~Holder() {
- free((void *)_objdata);
- _objdata = 0;
+ char data[4]; // start of object
+
+ void zero() { refCount.zero(); }
+
+ // these are called automatically by boost::intrusive_ptr
+ friend void intrusive_ptr_add_ref(Holder* h) { h->refCount++; }
+ friend void intrusive_ptr_release(Holder* h) {
+#if defined(_DEBUG) // cant use dassert or DEV here
+ assert((int)h->refCount > 0); // make sure we haven't already freed the buffer
+#endif
+ if(--(h->refCount) == 0){
+#if defined(_DEBUG)
+ unsigned sz = (unsigned&) *h->data;
+ assert(sz < BSONObjMaxInternalSize * 3);
+ memset(h->data, 0xdd, sz);
+#endif
+ free(h);
+ }
}
- private:
- const char *_objdata;
};
+#pragma pack()
+ private:
const char *_objdata;
- boost::shared_ptr< Holder > _holder;
+ boost::intrusive_ptr< Holder > _holder;
void _assertInvalid() const;
- void init(const char *data, bool ifree) {
- if ( ifree )
- _holder.reset( new Holder( data ) );
+
+ void init(Holder *holder) {
+ _holder = holder; // holder is now managed by intrusive_ptr
+ init(holder->data);
+ }
+ void init(const char *data) {
_objdata = data;
if ( !isValid() )
_assertInvalid();
diff --git a/bson/bsonobjbuilder.h b/bson/bsonobjbuilder.h
index a39b529..86a52ac 100644
--- a/bson/bsonobjbuilder.h
+++ b/bson/bsonobjbuilder.h
@@ -24,10 +24,15 @@
#include <limits>
#include <cmath>
-using namespace std;
+#include <boost/static_assert.hpp>
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonmisc.h"
namespace mongo {
+ using namespace std;
+
#if defined(_WIN32)
// warning: 'this' : used in base member initializer list
#pragma warning( disable : 4355 )
@@ -81,18 +86,21 @@ namespace mongo {
class BSONObjBuilder : boost::noncopyable {
public:
/** @param initsize this is just a hint as to the final size of the object */
- BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize), _offset( 0 ), _s( this ) , _tracker(0) , _doneCalled(false) {
- _b.skip(4); /*leave room for size field*/
+ BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof(unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4); /*leave room for size field and ref-count*/
}
- /* dm why do we have this/need this? not clear to me, comment please tx. */
- /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder */
+ /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
+ * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for example.
+ */
BSONObjBuilder( BufBuilder &baseBuilder ) : _b( baseBuilder ), _buf( 0 ), _offset( baseBuilder.len() ), _s( this ) , _tracker(0) , _doneCalled(false) {
_b.skip( 4 );
}
- BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() ), _offset(0), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
- _b.skip( 4 );
+ BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() + sizeof(unsigned) ), _offset( sizeof(unsigned) ), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4);
}
~BSONObjBuilder() {
@@ -146,9 +154,17 @@ namespace mongo {
return *this;
}
-
/** add header for a new subobject and return bufbuilder for writing to
- the subobject's body */
+ * the subobject's body
+ *
+ * example:
+ *
+ * BSONObjBuilder b;
+ * BSONObjBuilder sub (b.subobjStart("fieldName"));
+ * // use sub
+ * sub.done()
+ * // use b and convert to object
+ */
BufBuilder &subobjStart(const StringData& fieldName) {
_b.appendNum((char) Object);
_b.appendStr(fieldName);
@@ -218,7 +234,7 @@ namespace mongo {
long long x = n;
if ( x < 0 )
x = x * -1;
- if ( x < ( numeric_limits<int>::max() / 2 ) )
+ if ( x < ( (numeric_limits<int>::max)() / 2 ) ) // extra () to avoid max macro on windows
append( fieldName , (int)n );
else
append( fieldName , n );
@@ -247,14 +263,13 @@ namespace mongo {
return *this;
}
-
BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ) {
static long long maxInt = (int)pow( 2.0 , 30.0 );
static long long maxDouble = (long long)pow( 2.0 , 40.0 );
-
- if ( l < maxInt )
+ long long x = l >= 0 ? l : -l;
+ if ( x < maxInt )
append( fieldName , (int)l );
- else if ( l < maxDouble )
+ else if ( x < maxDouble )
append( fieldName , (double)l );
else
append( fieldName , l );
@@ -366,12 +381,13 @@ namespace mongo {
return *this;
}
- /** Append a string element. len DOES include terminating nul */
- BSONObjBuilder& append(const StringData& fieldName, const char *str, int len) {
+ /** Append a string element.
+ @param sz size includes terminating null character */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str, int sz) {
_b.appendNum((char) String);
_b.appendStr(fieldName);
- _b.appendNum((int)len);
- _b.appendBuf(str, len);
+ _b.appendNum((int)sz);
+ _b.appendBuf(str, sz);
return *this;
}
/** Append a string element */
@@ -517,6 +533,10 @@ namespace mongo {
template < class T >
BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
+ /** Append a set of values. */
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const set< T >& vals );
+
/**
* destructive
* The returned BSONObj will free the buffer when it is finished.
@@ -525,8 +545,10 @@ namespace mongo {
BSONObj obj() {
bool own = owned();
massert( 10335 , "builder does not own memory", own );
- int l;
- return BSONObj(decouple(l), true);
+ doneFast();
+ BSONObj::Holder* h = (BSONObj::Holder*)_b.buf();
+ decouple(); // sets _b.buf() to NULL
+ return BSONObj(h);
}
/** Fetch the object we have built.
@@ -535,7 +557,7 @@ namespace mongo {
would like the BSONObj to last longer than the builder.
*/
BSONObj done() {
- return BSONObj(_done(), /*ifree*/false);
+ return BSONObj(_done());
}
// Like 'done' above, but does not construct a BSONObj to return to the caller.
@@ -569,7 +591,7 @@ namespace mongo {
void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
static string numStr( int i ) {
- if (i>=0 && i<100)
+ if (i>=0 && i<100 && numStrsReady)
return numStrs[i];
StringBuilder o;
o << i;
@@ -623,6 +645,8 @@ namespace mongo {
int len() const { return _b.len(); }
+ BufBuilder& bb() { return _b; }
+
private:
char* _done() {
if ( _doneCalled )
@@ -647,6 +671,7 @@ namespace mongo {
bool _doneCalled;
static const string numStrs[100]; // cache of 0 to 99 inclusive
+ static bool numStrsReady; // for static init safety. see comments in db/jsobj.cpp
};
class BSONArrayBuilder : boost::noncopyable {
@@ -692,7 +717,23 @@ namespace mongo {
return *this;
}
- BufBuilder &subobjStart( const StringData& name = "0" ) {
+ // These two just use next position
+ BufBuilder &subobjStart() { return _b.subobjStart( num() ); }
+ BufBuilder &subarrayStart() { return _b.subarrayStart( num() ); }
+
+ // These fill missing entries up to pos. if pos is < next pos is ignored
+ BufBuilder &subobjStart(int pos) {
+ fill(pos);
+ return _b.subobjStart( num() );
+ }
+ BufBuilder &subarrayStart(int pos) {
+ fill(pos);
+ return _b.subarrayStart( num() );
+ }
+
+ // These should only be used where you really need interface compatability with BSONObjBuilder
+ // Currently they are only used by update.cpp and it should probably stay that way
+ BufBuilder &subobjStart( const StringData& name ) {
fill( name );
return _b.subobjStart( num() );
}
@@ -720,7 +761,16 @@ namespace mongo {
long int n = strtol( name.data(), &r, 10 );
if ( *r )
uasserted( 13048, (string)"can't append to array using string field name [" + name.data() + "]" );
- while( _i < n )
+ fill(n);
+ }
+
+ void fill (int upTo){
+ // if this is changed make sure to update error message and jstests/set7.js
+ const int maxElems = 1500000;
+ BOOST_STATIC_ASSERT(maxElems < (BSONObjMaxUserSize/10));
+ uassert(15891, "can't backfill array to larger than 1,500,000 elements", upTo <= maxElems);
+
+ while( _i < upTo )
append( nullElt() );
}
@@ -749,16 +799,27 @@ namespace mongo {
return *this;
}
- template < class T >
- inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ template < class L >
+ inline BSONObjBuilder& _appendIt( BSONObjBuilder& _this, const StringData& fieldName, const L& vals ) {
BSONObjBuilder arrBuilder;
int n = 0;
- for( typename list< T >::const_iterator i = vals.begin(); i != vals.end(); i++ )
- arrBuilder.append( numStr(n++), *i );
- appendArray( fieldName, arrBuilder.done() );
- return *this;
+ for( typename L::const_iterator i = vals.begin(); i != vals.end(); i++ )
+ arrBuilder.append( BSONObjBuilder::numStr(n++), *i );
+ _this.appendArray( fieldName, arrBuilder.done() );
+ return _this;
}
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ return _appendIt< list< T > >( *this, fieldName, vals );
+ }
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const set< T >& vals ) {
+ return _appendIt< set< T > >( *this, fieldName, vals );
+ }
+
+
// $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT 6));
inline BSONObj OR(const BSONObj& a, const BSONObj& b)
{ return BSON( "$or" << BSON_ARRAY(a << b) ); }
diff --git a/bson/bsonobjiterator.h b/bson/bsonobjiterator.h
index 6e6a69e..39ae24d 100644
--- a/bson/bsonobjiterator.h
+++ b/bson/bsonobjiterator.h
@@ -26,6 +26,8 @@ namespace mongo {
Note each BSONObj ends with an EOO element: so you will get more() on an empty
object, although next().eoo() will be true.
+ The BSONObj must stay in scope for the duration of the iterator's execution.
+
todo: we may want to make a more stl-like iterator interface for this
with things like begin() and end()
*/
@@ -35,39 +37,44 @@ namespace mongo {
*/
BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
- if ( sz == 0 ) {
+ if ( MONGO_unlikely(sz == 0) ) {
_pos = _theend = 0;
return;
}
_pos = jso.objdata() + 4;
- _theend = jso.objdata() + sz;
+ _theend = jso.objdata() + sz - 1;
}
BSONObjIterator( const char * start , const char * end ) {
_pos = start + 4;
- _theend = end;
+ _theend = end - 1;
}
/** @return true if more elements exist to be enumerated. */
- bool more() { return _pos < _theend && _pos[0]; }
+ bool more() { return _pos < _theend; }
/** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
- bool moreWithEOO() { return _pos < _theend; }
+ bool moreWithEOO() { return _pos <= _theend; }
/** @return the next element in the object. For the final element, element.eoo() will be true. */
- BSONElement next( bool checkEnd = false ) {
- assert( _pos < _theend );
- BSONElement e( _pos, checkEnd ? (int)(_theend - _pos) : -1 );
- _pos += e.size( checkEnd ? (int)(_theend - _pos) : -1 );
+ BSONElement next( bool checkEnd ) {
+ assert( _pos <= _theend );
+ BSONElement e( _pos, checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ _pos += e.size( checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ return e;
+ }
+ BSONElement next() {
+ assert( _pos <= _theend );
+ BSONElement e(_pos);
+ _pos += e.size();
return e;
}
-
void operator++() { next(); }
void operator++(int) { next(); }
BSONElement operator*() {
- assert( _pos < _theend );
- return BSONElement(_pos, -1);
+ assert( _pos <= _theend );
+ return BSONElement(_pos);
}
private:
@@ -102,6 +109,29 @@ namespace mongo {
int _cur;
};
+ /** transform a BSON array into a vector of BSONElements.
+ we match array # positions with their vector position, and ignore
+ any fields with non-numeric field names.
+ */
+ inline vector<BSONElement> BSONElement::Array() const {
+ chk(mongo::Array);
+ vector<BSONElement> v;
+ BSONObjIterator i(Obj());
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const char *f = e.fieldName();
+ try {
+ unsigned u = stringToNum(f);
+ assert( u < 1000000 );
+ if( u >= v.size() )
+ v.resize(u+1);
+ v[u] = e;
+ }
+ catch(unsigned) { }
+ }
+ return v;
+ }
+
/** Similar to BOOST_FOREACH
*
* because the iterator is defined outside of the for, you must use {} around
diff --git a/bson/inline_decls.h b/bson/inline_decls.h
index 1605611..30da9b4 100644
--- a/bson/inline_decls.h
+++ b/bson/inline_decls.h
@@ -1,20 +1,19 @@
-// inline.h
-
-/**
-* Copyright (C) 2010 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
+// inline_decls.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#pragma once
@@ -31,3 +30,39 @@
#define NOINLINE_DECL
#endif
+
+namespace mongo {
+
+/* Note: do not clutter code with these -- ONLY use in hot spots / significant loops. */
+
+#if !defined(__GNUC__)
+
+// branch prediction. indicate we expect to be true
+# define MONGO_likely(x) ((bool)(x))
+
+// branch prediction. indicate we expect to be false
+# define MONGO_unlikely(x) ((bool)(x))
+
+# if defined(_WIN32)
+ // prefetch data from memory
+ inline void prefetch(const void *p) {
+#if defined(_MM_HINT_T0)
+ _mm_prefetch((char *) p, _MM_HINT_T0);
+#endif
+ }
+#else
+ inline void prefetch(void *p) { }
+#endif
+
+#else
+
+# define MONGO_likely(x) ( __builtin_expect((bool)(x), 1) )
+# define MONGO_unlikely(x) ( __builtin_expect((bool)(x), 0) )
+
+ inline void prefetch(void *p) {
+ __builtin_prefetch(p);
+ }
+
+#endif
+
+}
diff --git a/bson/oid.cpp b/bson/oid.cpp
index 6aa0730..3aee14a 100644
--- a/bson/oid.cpp
+++ b/bson/oid.cpp
@@ -19,6 +19,7 @@
#include "oid.h"
#include "util/atomic_int.h"
#include "../db/nonce.h"
+#include "bsonobjbuilder.h"
BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
@@ -34,7 +35,7 @@ namespace mongo {
#elif defined(__linux__) || defined(__APPLE__) || defined(__sunos__)
pid = (unsigned short) getpid();
#else
- pid = (unsigned short) security.getNonce();
+ pid = (unsigned short) Security::getNonce();
#endif
return pid;
}
@@ -53,13 +54,13 @@ namespace mongo {
// this is not called often, so the following is not expensive, and gives us some
// testing that nonce generation is working right and that our OIDs are (perhaps) ok.
{
- nonce a = security.getNonce();
- nonce b = security.getNonce();
- nonce c = security.getNonce();
+ nonce64 a = Security::getNonceDuringInit();
+ nonce64 b = Security::getNonceDuringInit();
+ nonce64 c = Security::getNonceDuringInit();
assert( !(a==b && b==c) );
}
- unsigned long long n = security.getNonce();
+ unsigned long long n = Security::getNonceDuringInit();
OID::MachineAndPid x = ourMachine = (OID::MachineAndPid&) n;
foldInPid(x);
return x;
@@ -96,7 +97,7 @@ namespace mongo {
}
void OID::init() {
- static AtomicUInt inc = (unsigned) security.getNonce();
+ static AtomicUInt inc = (unsigned) Security::getNonce();
{
unsigned t = (unsigned) time(0);
@@ -151,4 +152,22 @@ namespace mongo {
return time;
}
+ const string BSONObjBuilder::numStrs[] = {
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
+ "20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
+ "30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
+ "40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
+ "50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
+ "60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
+ "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
+ "80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
+ "90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
+ };
+
+ // This is to ensure that BSONObjBuilder doesn't try to use numStrs before the strings have been constructed
+ // I've tested just making numStrs a char[][], but the overhead of constructing the strings each time was too high
+ // numStrsReady will be 0 until after numStrs is initialized because it is a static variable
+ bool BSONObjBuilder::numStrsReady = (numStrs[0].size() > 0);
+
}
diff --git a/bson/ordering.h b/bson/ordering.h
index 749e20d..bca3296 100644
--- a/bson/ordering.h
+++ b/bson/ordering.h
@@ -19,15 +19,22 @@
namespace mongo {
- /** A precomputation of a BSON key pattern.
+ // todo: ideally move to db/ instead of bson/, but elim any dependencies first
+
+ /** A precomputation of a BSON index or sort key pattern. That is something like:
+ { a : 1, b : -1 }
The constructor is private to make conversion more explicit so we notice where we call make().
Over time we should push this up higher and higher.
- */
+ */
class Ordering {
- const unsigned bits;
- const unsigned nkeys;
- Ordering(unsigned b,unsigned n) : bits(b),nkeys(n) { }
+ unsigned bits;
+ Ordering(unsigned b) : bits(b) { }
public:
+ Ordering(const Ordering& r) : bits(r.bits) { }
+ void operator=(const Ordering& r) {
+ bits = r.bits;
+ }
+
/** so, for key pattern { a : 1, b : -1 }
get(0) == 1
get(1) == -1
@@ -39,12 +46,12 @@ namespace mongo {
// for woCompare...
unsigned descending(unsigned mask) const { return bits & mask; }
- operator string() const {
+ /*operator string() const {
StringBuilder buf(32);
for ( unsigned i=0; i<nkeys; i++)
buf.append( get(i) > 0 ? "+" : "-" );
return buf.str();
- }
+ }*/
static Ordering make(const BSONObj& obj) {
unsigned b = 0;
@@ -59,7 +66,7 @@ namespace mongo {
b |= (1 << n);
n++;
}
- return Ordering(b,n);
+ return Ordering(b);
}
};
diff --git a/bson/stringdata.h b/bson/stringdata.h
index 46cdb7a..352dc51 100644
--- a/bson/stringdata.h
+++ b/bson/stringdata.h
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-#ifndef BSON_STRINDATA_HEADER
-#define BSON_STRINDATA_HEADER
+#pragma once
#include <string>
#include <cstring>
@@ -25,29 +24,31 @@ namespace mongo {
using std::string;
- // A StringData object wraps a 'const string&' or a 'const char*' without
- // copying its contents. The most common usage is as a function argument that
- // takes any of the two forms of strings above. Fundamentally, this class tries
- // go around the fact that string literals in C++ are char[N]'s.
- //
- // Note that the object StringData wraps around must be alive while the StringDAta
- // is.
-
+ /** A StringData object wraps a 'const string&' or a 'const char*' without
+ * copying its contents. The most common usage is as a function argument that
+ * takes any of the two forms of strings above. Fundamentally, this class tries
+ * go around the fact that string literals in C++ are char[N]'s.
+ *
+ * Note that the object StringData wraps around must be alive while the StringData
+ * is.
+ */
class StringData {
public:
- // Construct a StringData explicilty, for the case where the lenght of
- // string is not known. 'c' must be a pointer to a null-terminated string.
+ /** Construct a StringData, for the case where the length of
+ * string is not known. 'c' must be a pointer to a null-terminated string.
+ */
StringData( const char* c )
: _data(c), _size((unsigned) strlen(c)) {}
- // Construct a StringData explicitly, for the case where the length of the string
- // is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
- // must be the length that std::strlen(c) would return, a.k.a the index of the
- // terminator in c.
- StringData( const char* c, size_t strlenOfc )
- : _data(c), _size((unsigned) strlenOfc) {}
+ /** Construct a StringData explicitly, for the case where the length of the string
+ * is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
+ * must be the length that std::strlen(c) would return, a.k.a the index of the
+ * terminator in c.
+ */
+ StringData( const char* c, unsigned len )
+ : _data(c), _size(len) {}
- // Construct a StringData explicitly, for the case of a std::string.
+ /** Construct a StringData, for the case of a std::string. */
StringData( const string& s )
: _data(s.c_str()), _size((unsigned) s.size()) {}
@@ -59,19 +60,12 @@ namespace mongo {
: _data(&val[0]), _size(N-1) {}
// accessors
-
- const char* const data() const { return _data; }
+ const char* data() const { return _data; }
const unsigned size() const { return _size; }
private:
- // There are two assumptions we use bellow.
- // '_data' *always* finishes with a null terminator
- // 'size' does *not* account for the null terminator
- // These assumptions may make it easier to minimize changes to existing code.
- const char* const _data;
- const unsigned _size;
+ const char* const _data; // is always null terminated
+ const unsigned _size; // 'size' does not include the null terminator
};
} // namespace mongo
-
-#endif // BSON_STRINGDATA_HEADER
diff --git a/bson/util/atomic_int.h b/bson/util/atomic_int.h
index 1573552..e85a023 100644
--- a/bson/util/atomic_int.h
+++ b/bson/util/atomic_int.h
@@ -36,15 +36,17 @@ namespace mongo {
inline AtomicUInt operator--(); // --prefix
inline AtomicUInt operator--(int); // postfix--
- inline void zero() { x = 0; } // TODO: this isn't thread safe
+ inline void zero();
volatile unsigned x;
};
#if defined(_WIN32)
+ void AtomicUInt::zero() {
+ InterlockedExchange((volatile long*)&x, 0);
+ }
AtomicUInt AtomicUInt::operator++() {
- // InterlockedIncrement returns the new value
- return InterlockedIncrement((volatile long*)&x); //long is 32bits in Win64
+ return InterlockedIncrement((volatile long*)&x);
}
AtomicUInt AtomicUInt::operator++(int) {
return InterlockedIncrement((volatile long*)&x)-1;
@@ -57,6 +59,7 @@ namespace mongo {
}
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
// this is in GCC >= 4.1
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe - maybe
AtomicUInt AtomicUInt::operator++() {
return __sync_add_and_fetch(&x, 1);
}
@@ -70,8 +73,8 @@ namespace mongo {
return __sync_fetch_and_add(&x, -1);
}
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe
// from boost 1.39 interprocess/detail/atomic.hpp
-
inline unsigned atomic_int_helper(volatile unsigned *x, int val) {
int r;
asm volatile
diff --git a/bson/util/builder.h b/bson/util/builder.h
index 6f4ff9e..710c2d4 100644
--- a/bson/util/builder.h
+++ b/bson/util/builder.h
@@ -20,8 +20,6 @@
#include <string>
#include <string.h>
#include <stdio.h>
-#include <boost/shared_ptr.hpp>
-
#include "../inline_decls.h"
#include "../stringdata.h"
@@ -49,11 +47,47 @@ namespace mongo {
void msgasserted(int msgid, const char *msg);
- class BufBuilder {
+ class TrivialAllocator {
+ public:
+ void* Malloc(size_t sz) { return malloc(sz); }
+ void* Realloc(void *p, size_t sz) { return realloc(p, sz); }
+ void Free(void *p) { free(p); }
+ };
+
+ class StackAllocator {
+ public:
+ enum { SZ = 512 };
+ void* Malloc(size_t sz) {
+ if( sz <= SZ ) return buf;
+ return malloc(sz);
+ }
+ void* Realloc(void *p, size_t sz) {
+ if( p == buf ) {
+ if( sz <= SZ ) return buf;
+ void *d = malloc(sz);
+ memcpy(d, p, SZ);
+ return d;
+ }
+ return realloc(p, sz);
+ }
+ void Free(void *p) {
+ if( p != buf )
+ free(p);
+ }
+ private:
+ char buf[SZ];
+ };
+
+ template< class Allocator >
+ class _BufBuilder {
+ // non-copyable, non-assignable
+ _BufBuilder( const _BufBuilder& );
+ _BufBuilder& operator=( const _BufBuilder& );
+ Allocator al;
public:
- BufBuilder(int initsize = 512) : size(initsize) {
+ _BufBuilder(int initsize = 512) : size(initsize) {
if ( size > 0 ) {
- data = (char *) malloc(size);
+ data = (char *) al.Malloc(size);
if( data == 0 )
msgasserted(10000, "out of memory BufBuilder");
}
@@ -62,22 +96,23 @@ namespace mongo {
}
l = 0;
}
- ~BufBuilder() {
- kill();
- }
+ ~_BufBuilder() { kill(); }
void kill() {
if ( data ) {
- free(data);
+ al.Free(data);
data = 0;
}
}
- void reset( int maxSize = 0 ) {
+ void reset() {
+ l = 0;
+ }
+ void reset( int maxSize ) {
l = 0;
if ( maxSize && size > maxSize ) {
- free(data);
- data = (char*)malloc(maxSize);
+ al.Free(data);
+ data = (char*)al.Malloc(maxSize);
size = maxSize;
}
}
@@ -94,6 +129,9 @@ namespace mongo {
/* assume ownership of the buffer - you must then free() it */
void decouple() { data = 0; }
+ void appendUChar(unsigned char j) {
+ *((unsigned char*)grow(sizeof(unsigned char))) = j;
+ }
void appendChar(char j) {
*((char*)grow(sizeof(char))) = j;
}
@@ -131,13 +169,15 @@ namespace mongo {
appendBuf(&s, sizeof(T));
}
- void appendStr(const StringData &str , bool includeEOO = true ) {
- const int len = str.size() + ( includeEOO ? 1 : 0 );
+ void appendStr(const StringData &str , bool includeEndingNull = true ) {
+ const int len = str.size() + ( includeEndingNull ? 1 : 0 );
memcpy(grow(len), str.data(), len);
}
+ /** @return length of current string */
int len() const { return l; }
void setlen( int newLen ) { l = newLen; }
+ /** @return size of the buffer */
int getSize() const { return size; }
/* returns the pre-grow write position */
@@ -160,7 +200,7 @@ namespace mongo {
a = l + 16 * 1024;
if ( a > BufferMaxSize )
msgasserted(13548, "BufBuilder grow() > 64MB");
- data = (char *) realloc(data, a);
+ data = (char *) al.Realloc(data, a);
size= a;
}
@@ -171,6 +211,21 @@ namespace mongo {
friend class StringBuilder;
};
+ typedef _BufBuilder<TrivialAllocator> BufBuilder;
+
+ /** The StackBufBuilder builds smaller datasets on the stack instead of using malloc.
+ this can be significantly faster for small bufs. However, you can not decouple() the
+ buffer with StackBufBuilder.
+ While designed to be a variable on the stack, if you were to dynamically allocate one,
+ nothing bad would happen. In fact in some circumstances this might make sense, say,
+ embedded in some other object.
+ */
+ class StackBufBuilder : public _BufBuilder<StackAllocator> {
+ public:
+ StackBufBuilder() : _BufBuilder<StackAllocator>(StackAllocator::SZ) { }
+ void decouple(); // not allowed. not implemented.
+ };
+
#if defined(_WIN32)
#pragma warning( push )
// warning C4996: 'sprintf': This function or variable may be unsafe. Consider using sprintf_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS.
@@ -236,6 +291,8 @@ namespace mongo {
void reset( int maxSize = 0 ) { _buf.reset( maxSize ); }
std::string str() const { return std::string(_buf.data, _buf.l); }
+
+ int len() const { return _buf.l; }
private:
BufBuilder _buf;
diff --git a/bson/util/misc.h b/bson/util/misc.h
index b31f36f..33764e3 100644
--- a/bson/util/misc.h
+++ b/bson/util/misc.h
@@ -1,4 +1,4 @@
-/* @file util.h
+/* @file misc.h
*/
/*
@@ -91,4 +91,23 @@ namespace mongo {
return i;
return -1;
}
+
+ inline bool isNumber( char c ) {
+ return c >= '0' && c <= '9';
+ }
+
+ inline unsigned stringToNum(const char *str) {
+ unsigned x = 0;
+ const char *p = str;
+ while( 1 ) {
+ if( !isNumber(*p) ) {
+ if( *p == 0 && p != str )
+ break;
+ throw 0;
+ }
+ x = x * 10 + *p++ - '0';
+ }
+ return x;
+ }
+
}
diff --git a/buildscripts/errorcodes.py b/buildscripts/errorcodes.py
index a105647..dec1030 100755
--- a/buildscripts/errorcodes.py
+++ b/buildscripts/errorcodes.py
@@ -31,22 +31,58 @@ def assignErrorCodes():
codes = []
-def readErrorCodes( callback ):
- ps = [ re.compile( "([um]asser(t|ted)) *\( *(\d+)" ) ,
- re.compile( "(User|Msg)Exceptio(n)\( *(\d+)" )
+def readErrorCodes( callback, replaceZero = False ):
+ ps = [ re.compile( "(([umsg]asser(t|ted))) *\(( *)(\d+)" ) ,
+ re.compile( "((User|Msg|MsgAssertion)Exceptio(n))\(( *)(\d+)" ) ,
+ re.compile( "(((verify))) *\(( *)(\d+)" )
]
+
for x in utils.getAllSourceFiles():
+
+ needReplace = [False]
+ lines = []
+ lastCodes = [0]
lineNum = 1
+
for line in open( x ):
+
for p in ps:
- for m in p.findall( line ):
- codes.append( ( x , lineNum , line , m[2] ) )
- callback( x , lineNum , line , m[2] )
- lineNum = lineNum + 1
+
+ def repl( m ):
+ m = m.groups()
+
+ start = m[0]
+ spaces = m[3]
+ code = m[4]
+ if code == '0' and replaceZero :
+ code = getNextCode( lastCodes )
+ lastCodes.append( code )
+ code = str( code )
+ needReplace[0] = True
+
+ print( "Adding code " + code + " to line " + x + ":" + str( lineNum ) )
+
+ else :
+ codes.append( ( x , lineNum , line , code ) )
+ callback( x , lineNum , line , code )
+
+ return start + "(" + spaces + code
+
+ line = re.sub( p, repl, line )
+ if replaceZero : lines.append( line )
+ lineNum = lineNum + 1
+
+ if replaceZero and needReplace[0] :
+ print( "Replacing file " + x )
+ of = open( x + ".tmp", 'w' )
+ of.write( "".join( lines ) )
+ of.close()
+ os.rename( x + ".tmp", x )
+
-def getNextCode():
- highest = [0]
+def getNextCode( lastCodes = [0] ):
+ highest = [max(lastCodes)]
def check( fileName , lineNum , line , code ):
code = int( code )
if code > highest[0]:
@@ -64,7 +100,7 @@ def checkErrorCodes():
print( "%s:%d:%s %s" % seen[code] )
errors.append( seen[code] )
seen[code] = ( fileName , lineNum , line , code )
- readErrorCodes( checkDups )
+ readErrorCodes( checkDups, True )
return len( errors ) == 0
def getBestMessage( err , start ):
@@ -81,13 +117,11 @@ def getBestMessage( err , start ):
def genErrorOutput():
- g = utils.getGitVersion()
-
if os.path.exists( "docs/errors.md" ):
i = open( "docs/errors.md" , "r" )
- out = open( "docs/errors.md" , 'w' )
+ out = open( "docs/errors.md" , 'wb' )
out.write( "MongoDB Error Codes\n==========\n\n\n" )
prev = ""
@@ -107,7 +141,7 @@ def genErrorOutput():
out.write( f + "\n----\n" )
prev = f
- url = "http://github.com/mongodb/mongo/blob/" + g + "/" + f + "#L" + str(l)
+ url = "http://github.com/mongodb/mongo/blob/master/" + f + "#L" + str(l)
out.write( "* " + str(num) + " [code](" + url + ") " + getBestMessage( line , str(num) ) + "\n" )
diff --git a/buildscripts/hacks_ubuntu.py b/buildscripts/hacks_ubuntu.py
index 977d2df..3de1a6f 100644
--- a/buildscripts/hacks_ubuntu.py
+++ b/buildscripts/hacks_ubuntu.py
@@ -2,11 +2,13 @@
import os
def insert( env , options ):
-
- if not foundxulrunner( env , options ):
- if os.path.exists( "usr/include/mozjs/" ):
- env.Append( CPPDEFINES=[ "MOZJS" ] )
+ # now that sm is in the source tree, don't need this
+ # if not foundxulrunner( env , options ):
+ # if os.path.exists( "usr/include/mozjs/" ):
+ # env.Append( CPPDEFINES=[ "MOZJS" ] )
+
+ return
def foundxulrunner( env , options ):
best = None
diff --git a/buildscripts/makealldists.py b/buildscripts/makealldists.py
deleted file mode 100644
index 6b6f365..0000000
--- a/buildscripts/makealldists.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import with_statement
-import subprocess
-import sys
-import os
-import time
-import tempfile
-import errno
-import glob
-import shutil
-import settings
-import simples3
-
-def s3bucket():
- return simples3.S3Bucket(settings.bucket, settings.id, settings.key)
-
-def s3cp (bucket, filename, s3name):
- defaultacl="public-read"
- print "putting %s to %s" % (filename, s3name)
- bucket.put(s3name, open(filename, "rb").read(), acl=defaultacl)
-
-def pushrepo(repodir):
- files=subprocess.Popen(['find', repodir, '-type', 'f'], stdout=subprocess.PIPE).communicate()[0][:-1].split('\n')
- bucket=s3bucket()
- olddebs=[t[0] for t in bucket.listdir(prefix='distros/') if t[0].endswith('.deb')]
- newdebs=[]
- for fn in files:
- if len(fn) == 0:
- continue
- tail = fn[len(repodir):]
- # Note: be very careful not to produce s3names containing
- # sequences of repeated slashes: s3 doesn't treat a////b as
- # equivalent to a/b.
- s3name1='distros-archive/'+time.strftime('%Y%m%d')+tail
- s3name2='distros'+tail
- s3cp(bucket, fn, s3name1)
- s3cp(bucket, fn, s3name2)
- if s3name1.endswith('.deb'):
- newdebs.append(s3name1)
- # FIXME: we ought to clean out old debs eventually, but this will
- # blow away too much if we're trying to push a subset of what's
- # supposed to be available.
- #[bucket.delete(deb) for deb in set(olddebs).difference(set(newdebs))]
-
-def cat (inh, outh):
- inh.seek(0)
- for line in inh:
- outh.write(line)
- inh.close()
-
-# This generates all tuples from mixed-radix counting system, essentially.
-def gen(listlist):
- dim=len(listlist)
- a=[0 for ignore in listlist]
- while True:
- yield [listlist[i][a[i]] for i in range(dim)]
- a[0]+=1
- for j in range(dim):
- if a[j] == len(listlist[j]):
- if j<dim-1:
- a[j+1]+=1
- else:
- return
- a[j]=0
-
-def dirify(string):
- return (string if string[-1:] in '\/' else string+'/')
-def fileify(string):
- return (string if string[-1:] not in '\/' else string.rstrip('\/'))
-
-# WTF: os.makedirs errors if the leaf exists?
-def makedirs(f):
- try:
- os.makedirs(f)
- except OSError: # as exc: # Python >2.5
- exc=sys.exc_value
- if exc.errno == errno.EEXIST:
- pass
- else:
- raise exc
-
-
-
-# This is a fairly peculiar thing to want to do, but our build process
-# creates several apt repositories for each mongo version we build on
-# any given Debian/Ubutnu release. To merge repositories together, we
-# must concatenate the Packages.gz files.
-def merge_directories_concatenating_conflicts (target, sources):
- print sources
- target = dirify(target)
- for source in sources:
- source = dirify(source)
- files = subprocess.Popen(["find", source, "-type", "f"], stdout=subprocess.PIPE).communicate()[0].split('\n')
- for f in files:
- if f == '':
- continue
- rel = f[len(source):]
- o=target+rel
- makedirs(os.path.dirname(o))
- with open(f) as inh:
- with open(target+rel, "a") as outh:
- outh.write(inh.read())
-
-
-def parse_mongo_version_spec(spec):
- l = spec.split(':')
- if len(l) == 1:
- l+=['','']
- elif len(l) == 2:
- l+=['']
- return l
-
-def logfh(distro, distro_version, arch):
- prefix = "%s-%s-%s.log." % (distro, distro_version, arch)
- # This is a NamedTemporaryFile mostly so that I can tail(1) them
- # as we go.
- return tempfile.NamedTemporaryFile("w+b", -1, prefix=prefix)
-
-def spawn(distro, distro_version, arch, spec, directory, opts):
- argv = ["python", "makedist.py"] + opts + [ directory, distro, distro_version, arch ] + [ spec ]
-# cmd = "mkdir -p %s; cd %s; touch foo.deb; echo %s %s %s %s %s | tee Packages " % ( directory, directory, directory, distro, distro_version, arch, mongo_version )
-# print cmd
-# argv = ["sh", "-c", cmd]
- fh = logfh(distro, distro_version, arch)
- print >> fh, "Running %s" % argv
- # it's often handy to be able to run these things at the shell
- # manually. FIXME: this ought to be slightly less than thoroughly
- # ignorant of quoting issues (as is is now).
- print >> fh, " ".join(argv)
- fh.flush()
- proc = subprocess.Popen(argv, stdin=None, stdout=fh, stderr=fh)
- return (proc, fh, distro, distro_version, arch, spec)
-
-def win(name, logfh, winfh):
- logfh.seek(0)
- print >> winfh, "=== Winner %s ===" % name
- cat(logfh, winfh)
- print >> winfh, "=== End winner %s ===" % name
-
-def lose(name, logfh, losefh):
- logfh.seek(0)
- print >> losefh, "=== Loser %s ===" % name
- cat(logfh, losefh)
- print >> losefh, "=== End loser %s ===" % name
-
-def wait(procs, winfh, losefh, winners, losers):
- print "."
- sys.stdout.flush()
- try:
- (pid, stat) = os.wait()
- except OSError, err:
- print >> sys.stderr, "This shouldn't happen."
- print >> sys.stderr, err
- next
- if pid:
- [tup] = [tup for tup in procs if tup[0].pid == pid]
- (proc, logfh, distro, distro_version, arch, spec) = tup
- procs.remove(tup)
- name = "%s %s %s" % (distro, distro_version, arch)
- if os.WIFEXITED(stat):
- if os.WEXITSTATUS(stat) == 0:
- win(name, logfh, winfh)
- winners.append(name)
- else:
- lose(name, logfh, losefh)
- losers.append(name)
- if os.WIFSIGNALED(stat):
- lose(name, logfh, losefh)
- losers.append(name)
-
-
-
-def __main__():
- # FIXME: getopt & --help.
- print " ".join(sys.argv)
- branches = sys.argv[-1]
- makedistopts = sys.argv[1:-1]
-
- # Output from makedist.py goes here.
- outputroot=tempfile.mkdtemp()
- repodir=tempfile.mkdtemp()
-
- print "makedist output under: %s\ncombined repo: %s\n" % (outputroot, repodir)
- sys.stdout.flush()
- # Add more dist/version/architecture tuples as they're supported.
- dists = (("ubuntu", "10.10"),
- ("ubuntu", "10.4"),
- ("ubuntu", "9.10"),
- ("ubuntu", "9.4"),
- #("ubuntu", "8.10"),
- ("debian", "5.0"),
- ("centos", "5.4"),
- #("fedora", "12"),
- ("fedora", "13"),
- ("fedora", "14"))
- arches = ("x86", "x86_64")
-# mongos = branches.split(',')
- # Run a makedist for each distro/version/architecture tuple above.
- winners = []
- losers = []
- winfh=tempfile.TemporaryFile()
- losefh=tempfile.TemporaryFile()
- procs = []
- count = 0
- for ((distro, distro_version), arch, spec) in gen([dists, arches, [branches]]):
- # FIXME: no x86 fedoras on RackSpace circa 04/10.
- if distro == "fedora" and arch == "x86":
- continue
- count+=1
- opts = makedistopts
- if distro in ["debian", "ubuntu"]:
- outputdir = "%s/deb/%s" % (outputroot, distro)
- elif distro in ["centos", "fedora", "redhat"]:
- outputdir = "%s/rpm/%s/%s/os" % (outputroot, distro, distro_version)
- else:
- raise Exception("unsupported distro %s" % distro)
- #opts += ["--subdirs"]
-
- procs.append(spawn(distro, distro_version, arch, spec, outputdir, opts))
-
- if len(procs) == 8:
- wait(procs, winfh, losefh, winners, losers)
-
- while procs:
- wait(procs, winfh, losefh, winners, losers)
-
- winfh.seek(0)
- losefh.seek(0)
- nwinners=len(winners)
- nlosers=len(losers)
- print "%d winners; %d losers" % (nwinners, nlosers)
- cat(winfh, sys.stdout)
- cat(losefh, sys.stdout)
- print "%d winners; %d losers" % (nwinners, nlosers)
- if count == nwinners + nlosers:
- print "All jobs accounted for"
-# return 0
- else:
- print "Lost some jobs...?"
- return 1
-
- sys.stdout.flush()
- sys.stderr.flush()
-
- # this is sort of ridiculous, but the outputs from rpmbuild look
- # like RPM/<arch>, but the repo wants to look like
- # <arch>/RPM.
- for dist in os.listdir(outputroot+'/rpm'):
- if dist in ["centos", "fedora", "redhat"]:
- distdir="%s/rpm/%s" % (outputroot, dist)
- rpmdirs = subprocess.Popen(["find", distdir, "-type", "d", "-a", "-name", "RPMS"], stdout=subprocess.PIPE).communicate()[0].split('\n')[:-1]
- for rpmdir in rpmdirs:
- for arch in os.listdir(rpmdir):
- archdir="%s/../%s" % (rpmdir, arch)
- os.mkdir(archdir)
- os.rename("%s/%s" % (rpmdir, arch), "%s/RPMS" % (archdir,))
- os.rmdir(rpmdir)
-
-
- for flavor in os.listdir(outputroot):
- argv=["python", "mergerepositories.py", flavor, "%s/%s" % (outputroot, flavor), repodir]
- print "running %s" % argv
- print " ".join(argv)
- r = subprocess.Popen(argv).wait()
- if r != 0:
- raise Exception("mergerepositories.py exited %d" % r)
- print repodir
- #pushrepo(repodir)
- #shutil.rmtree(outputroot)
- #shutil.rmtree(repodir)
-
- return 0
-
-
-if __name__ == '__main__':
- __main__()
-
-
-# FIXME: this ought to be someplace else.
-
-# FIXME: remove this comment when the buildbot does this. After this
-# program, run something that amounts to
-#
-# find /tmp/distros -name *.deb -or -name Packages.gz | while read f; do echo "./s3cp.py $f ${f#/tmp/}"; done
-#
-# where ./s3cp.py is a trivial s3 put executable in this directory.
-
-# merge_directories_concatenating_conflicts('/tmp/distros/debian', '/tmp/distros-20100222/debian/HEAD', '/tmp/distros-20100222/debian/r1.3.2','/tmp/distros-20100222/debian/v1.2')
-
-# merge_directories_concatenating_conflicts('/tmp/distros/ubuntu', '/tmp/distros-20100222/ubuntu/HEAD', '/tmp/distros-20100222/ubuntu/r1.3.2', '/tmp/distros-20100222/ubuntu/v1.2')
diff --git a/buildscripts/makedist.py b/buildscripts/makedist.py
deleted file mode 100644
index b5387c2..0000000
--- a/buildscripts/makedist.py
+++ /dev/null
@@ -1,940 +0,0 @@
-#!/usr/bin/env python
-
-# makedist.py: make a distro package (on an EC2 (or sometimes
-# RackSpace) instance)
-
-# For ease of use, put a file called settings.py someplace in your
-# sys.path, containing something like the following:
-
-# makedist = {
-# # This gets supplied to EC2 to rig up an ssh key for
-# # the remote user.
-# "ec2_sshkey" : "key-id",
-# # And so we need to tell our ssh processes where to find the
-# # appropriate public key file.
-# "ssh_keyfile" : "/path/to/key-id-file"
-# }
-
-# Notes: although there is a Python library for accessing EC2 as a web
-# service, it seemed as if it would be less work to just shell out to
-# the three EC2 management tools we use.
-
-# To make a distribution we must:
-
-# 1. Fire up an EC2 AMI suitable for building.
-# 2. Get any build-dependencies and configurations onto the remote host.
-# 3. Fetch the mongodb source.
-# 4. Run the package building tools.
-# 5. Save the package archives someplace permanent (eventually we
-# ought to install them into a public repository for the distro).
-# Unimplemented:
-# 6. Fire up an EC2 AMI suitable for testing whether the packages
-# install.
-# 7. Check whether the packages install and run.
-
-# The implementations of steps 1, 2, 4, 5, 6, and 7 will depend on the
-# distro of host we're talking to (Ubuntu, CentOS, Debian, etc.).
-
-from __future__ import with_statement
-import subprocess
-import sys
-import signal
-import getopt
-import socket
-import time
-import os.path
-import tempfile
-import string
-import settings
-
-from libcloud.types import Provider
-from libcloud.providers import get_driver
-from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
-from libcloud.base import Node, NodeImage, NodeSize, NodeState
-from libcloud.ssh import ParamikoSSHClient
-
-# For the moment, we don't handle any of the errors we raise, so it
-# suffices to have a simple subclass of Exception that just
-# stringifies according to a desired format.
-class SimpleError(Exception):
- def __init__(self, *args):
- self.args = args
- def __str__(self):
- return self.args[0] % self.args[1:]
-
-class SubcommandError(SimpleError):
- def __init__(self, *args):
- self.status = args[2]
- super(SubcommandError, self).__init__(*args)
-
-class BaseConfigurator (object):
- def __init__ (self, **kwargs):
- self.configuration = []
- self.arch=kwargs["arch"]
- self.distro_name=kwargs["distro_name"]
- self.distro_version=kwargs["distro_version"]
-
- def lookup(self, what, dist, vers, arch):
- for (wht, seq) in self.configuration:
- if what == wht:
- for ((dpat, vpat, apat), payload) in seq:
- # For the moment, our pattern facility is just "*" or exact match.
- if ((dist == dpat or dpat == "*") and
- (vers == vpat or vpat == "*") and
- (arch == apat or apat == "*")):
- return payload
- if getattr(self, what, False):
- return getattr(self, what)
- else:
- raise SimpleError("couldn't find a%s %s configuration for dist=%s, version=%s, arch=%s",
- "n" if ("aeiouAEIOU".find(what[0]) > -1) else "",
- what, dist, vers, arch)
-
- def default(self, what):
- return self.lookup(what, self.distro_name, self.distro_version, self.arch)
- def findOrDefault(self, dict, what):
- return (dict[what] if what in dict else self.lookup(what, self.distro_name, self.distro_version, self.arch))
-
-class BaseHostConfigurator (BaseConfigurator):
- def __init__(self, **kwargs):
- super(BaseHostConfigurator, self).__init__(**kwargs)
- self.configuration += [("distro_arch",
- ((("debian", "*", "x86_64"), "amd64"),
- (("ubuntu", "*", "x86_64"), "amd64"),
- (("debian", "*", "x86"), "i386"),
- (("ubuntu", "*", "x86"), "i386"),
- (("centos", "*", "x86_64"), "x86_64"),
- (("fedora", "*", "x86_64"), "x86_64"),
- (("centos", "*", "x86"), "i386"),
- (("fedora", "*", "x86"), "i386"),
- (("*", "*", "x86_64"), "x86_64"),
- (("*", "*", "x86"), "x86"))) ,
- ]
-
-class LocalHost(object):
- @classmethod
- def runLocally(cls, argv):
- print "running %s" % argv
- r = subprocess.Popen(argv).wait()
- if r != 0:
- raise SubcommandError("subcommand %s exited %d", argv, r)
-
-class EC2InstanceConfigurator(BaseConfigurator):
- def __init__(self, **kwargs):
- super(EC2InstanceConfigurator, self).__init__(**kwargs)
- self.configuration += [("ec2_ami",
- ((("ubuntu", "10.10", "x86_64"), "ami-688c7801"),
- (("ubuntu", "10.10", "x86"), "ami-1a837773"),
- (("ubuntu", "10.4", "x86_64"), "ami-bf07ead6"),
- (("ubuntu", "10.4", "x86"), "ami-f707ea9e"),
- (("ubuntu", "9.10", "x86_64"), "ami-55739e3c"),
- (("ubuntu", "9.10", "x86"), "ami-bb709dd2"),
- (("ubuntu", "9.4", "x86_64"), "ami-eef61587"),
- (("ubuntu", "9.4", "x86"), "ami-ccf615a5"),
- (("ubuntu", "8.10", "x86"), "ami-c0f615a9"),
- (("ubuntu", "8.10", "x86_64"), "ami-e2f6158b"),
- (("ubuntu", "8.4", "x86"), "ami59b35f30"),
- (("ubuntu", "8.4", "x86_64"), "ami-27b35f4e"),
- (("debian", "5.0", "x86"), "ami-dcf615b5"),
- (("debian", "5.0", "x86_64"), "ami-f0f61599"),
- (("centos", "5.4", "x86"), "ami-f8b35e91"),
- (("centos", "5.4", "x86_64"), "ami-ccb35ea5"),
- (("fedora", "8", "x86_64"), "ami-2547a34c"),
- (("fedora", "8", "x86"), "ami-5647a33f"))),
- ("rackspace_imgname",
- ((("fedora", "12", "x86_64"), "Fedora 12"),
- (("fedora", "13", "x86_64"), "Fedora 13"),
- (("fedora", "14", "x86_64"), "Fedora 14"))),
- ("ec2_mtype",
- ((("*", "*", "x86"), "m1.small"),
- (("*", "*", "x86_64"), "m1.large"))),
- ]
-
-class nodeWrapper(object):
- def __init__(self, configurator, **kwargs):
- self.terminate = False if "no_terminate" in kwargs else True
- self.use_internal_name = False
-
- def getHostname(self):
- internal_name=self.node.private_ip[0]
- public_name=self.node.public_ip[0]
- if not (internal_name or external_name):
- raise Exception('host has no name?')
- if self.use_internal_name:
- # FIXME: by inspection, it seems this is sometimes the
- # empty string. Dunno if that's EC2 or libcloud being
- # stupid, but it's not good.
- if internal_name:
- return internal_name
- else:
- return public_name
- else:
- return public_name
-
- def initwait(self):
- print "waiting for node to spin up"
- # Wait for EC2 to tell us the node is running.
- while 1:
- n=None
- # EC2 sometimes takes a while to report a node.
- for i in range(6):
- nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
- if len(nodes)>0:
- n=nodes[0]
- break
- else:
- time.sleep(10)
- if not n:
- raise Exception("couldn't find node with id %s" % self.node.id)
- if n.state == NodeState.PENDING:
- time.sleep(10)
- else:
- self.node = n
- break
- print "ok"
- # Now wait for the node's sshd to be accepting connections.
- print "waiting for ssh"
- sshwait = True
- if sshwait == False:
- return
- while sshwait:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- try:
- s.connect((self.node.public_ip[0], 22))
- sshwait = False
- print "connected on port 22 (ssh)"
- time.sleep(15) # arbitrary timeout, in case the
- # remote sshd is slow.
- except socket.error, err:
- pass
- finally:
- s.close()
- time.sleep(3) # arbitrary timeout
- print "ok"
-
- def __enter__(self):
- self.start()
- # Note: we don't do an initwait() in __enter__ because if an
- # exception is raised during __enter__, __exit__ doesn't get
- # run (and by inspection RackSpace doesn't let you kill a node
- # that hasn't finished booting yet).
- return self
-
- def __exit__(self, type, value, traceback):
- self.stop()
-
- def stop(self):
- if self.terminate:
- print "Destroying node %s" % self.node.id
- self.node.destroy()
- else:
- print "Not terminating EC2 instance %s." % self.node.id
-
- def setup(self):
- pass
-
-class EC2Instance (nodeWrapper):
- def __init__(self, configurator, **kwargs):
- super(EC2Instance, self).__init__(configurator, **kwargs)
- # Stuff we need to start an instance: AMI name, key and cert
- # files. AMI and mtype default to configuration in this file,
- # but can be overridden.
- self.ec2_ami = configurator.findOrDefault(kwargs, "ec2_ami")
- self.ec2_mtype = configurator.findOrDefault(kwargs, "ec2_mtype")
- self.use_internal_name = True if "use_internal_name" in kwargs else False
- self.ec2_sshkey=kwargs["ec2_sshkey"]
-
- # FIXME: this needs to be a commandline option
- self.ec2_groups = ["default", "buildbot-slave", "dist-slave"]
-
-
- def start(self):
- "Fire up a fresh EC2 instance."
- EC2 = get_driver(Provider.EC2)
- self.driver = EC2NodeDriver(settings.id, settings.key)
- image = NodeImage(self.ec2_ami, self.ec2_ami, EC2)
- size = NodeSize(self.ec2_mtype, self.ec2_mtype, None, None, None, None, EC2)
- self.node = self.driver.create_node(image=image, name=self.ec2_ami, size=size, keyname=self.ec2_sshkey, securitygroup=self.ec2_groups)
- print "Created node %s" % self.node.id
-
- def list_nodes(self):
- return self.driver.list_nodes()
-
-class SshConnectionConfigurator (BaseConfigurator):
- def __init__(self, **kwargs):
- super(SshConnectionConfigurator, self).__init__(**kwargs)
- self.configuration += [("ssh_login",
- # FLAW: this actually depends more on the AMI
- # than the triple.
- ((("debian", "*", "*"), "root"),
- (("ubuntu", "10.10", "*"), "ubuntu"),
- (("ubuntu", "10.4", "*"), "ubuntu"),
- (("ubuntu", "9.10", "*"), "ubuntu"),
- (("ubuntu", "9.4", "*"), "root"),
- (("ubuntu", "8.10", "*"), "root"),
- (("ubuntu", "8.4", "*"), "ubuntu"),
- (("fedora", "*", "*"), "root"),
- (("centos", "*", "*"), "root"))),
- ]
-
-class SshConnection (object):
- def __init__(self, configurator, **kwargs):
- # Stuff we need to talk to the thing properly
- self.ssh_login = configurator.findOrDefault(kwargs, "ssh_login")
-
- self.ssh_host = kwargs["ssh_host"]
- self.ssh_keyfile=kwargs["ssh_keyfile"]
- # Gets set to False when we think we can ssh in.
- self.sshwait = True
-
- def initSsh(self):
- ctlpath="/tmp/ec2-ssh-%s-%s-%s" % (self.ssh_host, self.ssh_login, os.getpid())
- argv = ["ssh", "-o", "StrictHostKeyChecking no",
- "-M", "-o", "ControlPath %s" % ctlpath,
- "-v", "-l", self.ssh_login, "-i", self.ssh_keyfile,
- self.ssh_host]
- print "Setting up ssh master connection with %s" % argv
- self.sshproc = subprocess.Popen(argv)
- self.ctlpath = ctlpath
-
-
- def __enter__(self):
- self.initSsh()
- return self
-
- def __exit__(self, type, value, traceback):
- os.kill(self.sshproc.pid, signal.SIGTERM)
- self.sshproc.wait()
-
- def runRemotely(self, argv):
- """Run a command on the host."""
- LocalHost.runLocally(["ssh", "-o", "StrictHostKeyChecking no",
- "-S", self.ctlpath,
- "-l", self.ssh_login,
- "-i", self.ssh_keyfile,
- self.ssh_host] + argv)
-
- def sendFiles(self, files):
- for (localfile, remotefile) in files:
- LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
- "-o", "ControlMaster auto",
- "-o", "ControlPath %s" % self.ctlpath,
- "-i", self.ssh_keyfile,
- "-rv", localfile,
- self.ssh_login + "@" + self.ssh_host + ":" +
- ("" if remotefile is None else remotefile) ])
-
- def recvFiles(self, files):
- for (remotefile, localfile) in files:
- LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
- "-o", "ControlMaster auto",
- "-o", "ControlPath %s" % self.ctlpath,
- "-i", self.ssh_keyfile,
- "-rv",
- self.ssh_login + "@" + self.ssh_host +
- ":" + remotefile,
- "." if localfile is None else localfile ])
-
-
-class ScriptFileConfigurator (BaseConfigurator):
- deb_productdir = "dists"
- rpm_productdir = "/usr/src/redhat/RPMS" # FIXME: this could be
- # ~/redhat/RPMS or
- # something elsewhere
-
- preamble_commands = """
-set -x # verbose execution, for debugging
-set -e # errexit, stop on errors
-"""
- # Strictly speaking, we don't need to mangle debian files on rpm
- # systems (and vice versa), but (a) it doesn't hurt anything to do
- # so, and (b) mangling files the same way everywhere could
- # conceivably help uncover bugs in the hideous hideous sed
- # programs we're running here. (N.B., for POSIX wonks: POSIX sed
- # doesn't support either in-place file editing, which we use
- # below. So if we end up wanting to run these mangling commands
- # e.g., on a BSD, we'll need to make them fancier.)
- mangle_files_commands ="""
-# On debianoids, the package names in the changelog and control file
-# must agree, and only files in a subdirectory of debian/ matching the
-# package name will get included in the .deb, so we also have to mangle
-# the rules file.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '1s/.*([^)]*)/{pkg_name}{pkg_name_suffix} ({pkg_version})/' debian/changelog ) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^Source:.*/Source: {pkg_name}{pkg_name_suffix}/;
-s/^Package:.*mongodb/Package: {pkg_name}{pkg_name_suffix}\\
-Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|$(CURDIR)/debian/mongodb/|$(CURDIR)/debian/{pkg_name}{pkg_name_suffix}/|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|debian/mongodb.manpages|debian/{pkg_name}{pkg_name_suffix}.manpages|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}\\
-Conflicts: {pkg_name_conflicts}/; /^Version:/s/.*/Version: {pkg_version}/; /Requires.*mongo/s/mongo/{pkg_name}{pkg_name_suffix}/;' rpm/mongo.spec )
-# Debian systems require some ridiculous workarounds to get an init
-# script at /etc/init.d/mongodb when the packge name isn't the init
-# script name. Note: dh_installinit --name won't work, because that
-# option would require the init script under debian/ to be named
-# mongodb.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" &&
-ln debian/init.d debian/{pkg_name}{pkg_name_suffix}.mongodb.init &&
-ln debian/mongodb.upstart debian/{pkg_name}{pkg_name_suffix}.mongodb.upstart &&
-sed -i 's/dh_installinit/dh_installinit --name=mongodb/' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
-"""
-
- # If we're just packaging up nightlies, do this:
- nightly_build_mangle_files="""
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/scons[[:space:]]*$/d; s^scons.*install^mkdir -p debian/{pkg_name}{pkg_name_suffix} \&\& wget http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG mv -v ARG debian/{pkg_name}{pkg_name_suffix}/usr \&\& (rm debian/{pkg_name}{pkg_name_suffix}/usr/bin/mongosniff || true)^' debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^BuildRequires:.*//; s/scons.*\ -c//; s/scons.*\ all//; s^scons.*install^(mkdir -p $RPM_BUILD_ROOT/usr ; cd /tmp \&\& curl http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz > mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG cp -pRv ARG $RPM_BUILD_ROOT/usr \&\& (rm -r $RPM_BUILD_ROOT/usr/bin/mongosniff $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a $RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/include/mongo || true))^' rpm/mongo.spec)
-# Upstream nightlies no longer contain libmongoclient.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/%package devel/{{N;N;d;}}; /%description devel/{{N;N;N;N;N;d;}}; /%files devel/{{N;N;N;d;}};' rpm/mongo.spec )
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
-"""
-#$RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a
- mangle_files_for_new_deb_xulrunner_commands = """
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/xulrunner-dev/xulrunner-1.9.2-dev/g' debian/control )
-"""
-
- mangle_files_for_ancient_redhat_commands = """
-# Ancient RedHats ship with very old boosts and non-UTF8-aware js
-# libraries, so we need to link statically to those.
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|^scons.*((inst)all)|scons --prefix=$RPM_BUILD_ROOT/usr --extralib=nspr4 --staticlib=boost_system-mt,boost_thread-mt,boost_filesystem-mt,boost_program_options-mt,js $1|' rpm/mongo.spec )
-"""
-
- deb_prereq_commands = """
-# Configure debconf to never prompt us for input.
-export DEBIAN_FRONTEND=noninteractive
-apt-get update
-apt-get install -y {pkg_prereq_str}
-"""
-
- deb_build_commands="""
-mkdir -p "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
-mkdir -p "{pkg_product_dir}/{distro_version}/10gen/source"
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}"; debuild ) || exit 1
-# Try installing it
-dpkg -i {pkg_name}{pkg_name_suffix}*.deb
-ps ax | grep mongo || {{ echo "no running mongo" >/dev/stderr; exit 1; }}
-dpkg --remove $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
-dpkg --purge $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
-cp {pkg_name}{pkg_name_suffix}*.deb "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
-cp {pkg_name}{pkg_name_suffix}*.dsc "{pkg_product_dir}/{distro_version}/10gen/source"
-cp {pkg_name}{pkg_name_suffix}*.tar.gz "{pkg_product_dir}/{distro_version}/10gen/source"
-dpkg-scanpackages "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}/Packages.gz"
-dpkg-scansources "{pkg_product_dir}/{distro_version}/10gen/source" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/source/Sources.gz"
-"""
- centos_prereq_commands = """
-rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release-5-4.noarch.rpm
-yum -y install {pkg_prereq_str}
-"""
- fedora_prereq_commands = """
-#rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release-5-4.noarch.rpm
-yum -y install {pkg_prereq_str}
-"""
- rpm_build_commands="""
-for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p {rpmbuild_dir}/$d; done
-cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
-tar -cpzf {rpmbuild_dir}/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-rpmbuild -ba --target={distro_arch} {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
-# FIXME: should install the rpms, check if mongod is running.
-"""
- # FIXME: this is clean, but adds 40 minutes or so to the build process.
- old_rpm_precommands = """
-yum install -y bzip2-devel python-devel libicu-devel chrpath zlib-devel nspr-devel readline-devel ncurses-devel
-# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
-wget ftp://194.199.20.114/linux/EPEL/5Client/SRPMS/js-1.70-8.el5.src.rpm
-rpm -ivh js-1.70-8.el5.src.rpm
-sed -i 's/XCFLAGS.*$/XCFLAGS=\"%{{optflags}} -fPIC -DJS_C_STRINGS_ARE_UTF8\" \\\\/' /usr/src/redhat/SPECS/js.spec
-rpmbuild -ba /usr/src/redhat/SPECS/js.spec
-rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-1.70-8.{distro_arch}.rpm
-rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-devel-1.70-8.{distro_arch}.rpm
-# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
-wget ftp://195.220.108.108/linux/sourceforge/g/project/gr/gridiron2/support-files/FC10%20source%20RPMs/boost-1.38.0-1.fc10.src.rpm
-rpm -ivh boost-1.38.0-1.fc10.src.rpm
-rpmbuild -ba /usr/src/redhat/SPECS/boost.spec
-rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-1.38.0-1.{distro_arch}.rpm
-rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-devel-1.38.0-1.{distro_arch}.rpm
-"""
-
- # This horribleness is an attempt to work around ways that you're
- # not really meant to package things for Debian unless you are
- # Debian.
-
- # On very old Debianoids, libboost-<foo>-dev will be some old
- # boost that's not as thready as we want, but which Eliot says
- # will work; on very new Debianoids, libbost-<foo>-dev is what we
- # want.
- unversioned_deb_boost_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev"]
- # On some in-between Debianoids, libboost-<foo>-dev is still a
- # 1.34, but 1.35 packages are available, so we want those.
- versioned_deb_boost_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev"]
-
- new_versioned_deb_boost_prereqs = ["libboost-thread1.42-dev", "libboost-filesystem1.42-dev", "libboost-program-options1.42-dev", "libboost-date-time1.42-dev", "libboost1.42-dev"]
- unversioned_deb_xulrunner_prereqs = ["xulrunner-dev"]
-
- old_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9-dev"]
- new_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9.2-dev"]
-
- common_deb_prereqs = [ "build-essential", "dpkg-dev", "libreadline-dev", "libpcap-dev", "libpcre3-dev", "git-core", "scons", "debhelper", "devscripts", "git-core" ]
-
- centos_preqres = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
- fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git", "curl" ]
-
- def __init__(self, **kwargs):
- super(ScriptFileConfigurator, self).__init__(**kwargs)
- # FIXME: this method is disabled until we get back around to
- # actually building from source.
- if None: # kwargs["mongo_version"][0] == 'r':
- self.get_mongo_commands = """
-wget -Otarball.tgz "http://github.com/mongodb/mongo/tarball/{mongo_version}";
-tar xzf tarball.tgz
-mv "`tar tzf tarball.tgz | sed 's|/.*||' | sort -u | head -n1`" "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-"""
- else:
- self.get_mongo_commands = """
-git clone git://github.com/mongodb/mongo.git
-"""
- # This is disabled for the moment. it's for building the
- # tip of some versioned branch.
- if None: #kwargs['mongo_version'][0] == 'v':
- self.get_mongo_commands +="""
-( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "`git log origin/{mongo_version} | sed -n '1s/^commit //p;q'`" ) | tar xf -
-"""
- else:
- self.get_mongo_commands += """
-( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "{mongo_version}" ) | tar xf -
-"""
-
- if "local_mongo_dir" in kwargs:
- self.mangle_files_commands = """( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && rm -rf debian rpm && cp -pvR ~/pkg/* . )
-""" + self.mangle_files_commands
-
- self.configuration += [("pkg_product_dir",
- ((("ubuntu", "*", "*"), self.deb_productdir),
- (("debian", "*", "*"), self.deb_productdir),
- (("fedora", "*", "*"), "~/rpmbuild/RPMS"),
- (("centos", "*", "*"), "/usr/src/redhat/RPMS"))),
- ("pkg_prereqs",
- ((("ubuntu", "9.4", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "9.10", "*"),
- self.unversioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "10.10", "*"),
- self.new_versioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "10.4", "*"),
- self.unversioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "8.10", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("ubuntu", "8.4", "*"),
- self.unversioned_deb_boost_prereqs + self.old_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("debian", "5.0", "*"),
- self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
- (("fedora", "*", "*"),
- self.fedora_prereqs),
- (("centos", "5.4", "*"),
- self.centos_preqres))),
- # FIXME: this is deprecated
- ("commands",
- ((("debian", "*", "*"),
- self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("ubuntu", "10.4", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands + self.deb_build_commands),
- (("ubuntu", "*", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("centos", "*", "*"),
- self.preamble_commands + self.old_rpm_precommands + self.centos_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands + self.rpm_build_commands),
- (("fedora", "*", "*"),
- self.preamble_commands + self.old_rpm_precommands + self.fedora_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.rpm_build_commands))),
- ("preamble_commands",
- ((("*", "*", "*"), self.preamble_commands),
- )),
- ("install_prereqs",
- ((("debian", "*", "*"), self.deb_prereq_commands),
- (("ubuntu", "*", "*"), self.deb_prereq_commands),
- (("centos", "*", "*"), self.centos_prereq_commands),
- (("fedora", "*", "*"), self.fedora_prereq_commands))),
- ("get_mongo",
- ((("*", "*", "*"), self.get_mongo_commands),
- )),
- ("mangle_mongo",
- ((("debian", "*", "*"), self.mangle_files_commands),
- (("ubuntu", "10.10", "*"),
- self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
- (("ubuntu", "10.4", "*"),
- self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
- (("ubuntu", "*", "*"), self.mangle_files_commands),
- (("centos", "*", "*"),
- self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands),
- (("fedora", "*", "*"),
- self.mangle_files_commands))),
- ("build_prerequisites",
- ((("fedora", "*", "*"), self.old_rpm_precommands),
- (("centos", "*", "*"), self.old_rpm_precommands),
- (("*", "*", "*"), ''))),
- ("install_for_packaging",
- ((("debian", "*", "*"),""),
- (("ubuntu", "*", "*"),""),
- (("fedora", "*", "*"), ""),
- (("centos", "*", "*"),""))),
- ("build_package",
- ((("debian", "*", "*"),
- self.deb_build_commands),
- (("ubuntu", "*", "*"),
- self.deb_build_commands),
- (("fedora", "*", "*"),
- self.rpm_build_commands),
- (("centos", "*", "*"),
- self.rpm_build_commands))),
- ("pkg_name",
- ((("debian", "*", "*"), "mongodb"),
- (("ubuntu", "*", "*"), "mongodb"),
- (("centos", "*", "*"), "mongo"),
- (("fedora", "*", "*"), "mongo"))),
- # FIXME: there should be a command-line argument for this.
- ("pkg_name_conflicts",
- ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot", "-oldstable"]),
- )),
- ("rpmbuild_dir",
- ((("fedora", "*", "*"), "~/rpmbuild"),
- (("centos", "*", "*"), "/usr/src/redhat"),
- (("*", "*","*"), ''),
- )),
- ]
-
-
-
-
-class ScriptFile(object):
- def __init__(self, configurator, **kwargs):
- self.configurator = configurator
- self.mongo_version_spec = kwargs['mongo_version_spec']
- self.mongo_arch = kwargs["arch"] if kwargs["arch"] == "x86_64" else "i686"
- self.pkg_prereqs = configurator.default("pkg_prereqs")
- self.pkg_name = configurator.default("pkg_name")
- self.pkg_product_dir = configurator.default("pkg_product_dir")
- #self.formatter = configurator.default("commands")
- self.distro_name = configurator.default("distro_name")
- self.distro_version = configurator.default("distro_version")
- self.distro_arch = configurator.default("distro_arch")
-
- def bogoformat(self, fmt, **kwargs):
- r = ''
- i = 0
- while True:
- c = fmt[i]
- if c in '{}':
- i+=1
- c2=fmt[i]
- if c2 == c:
- r+=c
- else:
- j=i
- while True:
- p=fmt[j:].find('}')
- if p == -1:
- raise Exception("malformed format string starting at %d: no closing brace" % i)
- else:
- j+=p
- if len(fmt) > (j+1) and fmt[j+1]=='}':
- j+=2
- else:
- break
- key = fmt[i:j]
- r+=kwargs[key]
- i=j
- else:
- r+=c
- i+=1
- if i==len(fmt):
- return r
-
- def fmt(self, formatter, **kwargs):
- try:
- return string.Formatter.format(formatter, kwargs)
- finally:
- return self.bogoformat(formatter, **kwargs)
-
- def genscript(self):
- script=''
- formatter = self.configurator.default("preamble_commands") + self.configurator.default("install_prereqs")
- script+=self.fmt(formatter,
- distro_name=self.distro_name,
- distro_version=self.distro_version,
- distro_arch=self.distro_arch,
- pkg_name=self.pkg_name,
- pkg_product_dir=self.pkg_product_dir,
- mongo_arch=self.mongo_arch,
- pkg_prereq_str=" ".join(self.pkg_prereqs),
- )
-
- specs=self.mongo_version_spec.split(',')
- for spec in specs:
- (version, pkg_name_suffix, pkg_version) = parse_mongo_version_spec(spec)
- mongo_version = version if version[0] != 'n' else ('HEAD' if version == 'nlatest' else 'r'+version[1:]) #'HEAD'
- mongo_pub_version = version.lstrip('n') if version[0] in 'n' else 'latest'
- pkg_name_suffix = pkg_name_suffix if pkg_name_suffix else ''
- pkg_version = pkg_version
- pkg_name_conflicts = list(self.configurator.default("pkg_name_conflicts") if pkg_name_suffix else [])
- pkg_name_conflicts.remove(pkg_name_suffix) if pkg_name_suffix and pkg_name_suffix in pkg_name_conflicts else []
- formatter = self.configurator.default("get_mongo") + self.configurator.default("mangle_mongo") + (self.configurator.nightly_build_mangle_files if version[0] == 'n' else '') +(self.configurator.default("build_prerequisites") if version[0] != 'n' else '') + self.configurator.default("install_for_packaging") + self.configurator.default("build_package")
- script+=self.fmt(formatter,
- mongo_version=mongo_version,
- distro_name=self.distro_name,
- distro_version=self.distro_version,
- distro_arch=self.distro_arch,
- pkg_prereq_str=" ".join(self.pkg_prereqs),
- pkg_name=self.pkg_name,
- pkg_name_suffix=pkg_name_suffix,
- pkg_version=pkg_version,
- pkg_product_dir=self.pkg_product_dir,
- # KLUDGE: rpm specs and deb
- # control files use
- # comma-separated conflicts,
- # but there's no reason to
- # suppose this works elsewhere
- pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in pkg_name_conflicts]),
- mongo_arch=self.mongo_arch,
- mongo_pub_version=mongo_pub_version,
- rpmbuild_dir=self.configurator.default('rpmbuild_dir'))
- script+='rm -rf mongo'
- return script
-
- def __enter__(self):
- self.localscript=None
- # One of tempfile or I is very stupid.
- (fh, name) = tempfile.mkstemp('', "makedist.", ".")
- try:
- pass
- finally:
- os.close(fh)
- with open(name, 'w+') as fh:
- fh.write(self.genscript())
- self.localscript=name
- return self
-
- def __exit__(self, type, value, traceback):
- if self.localscript:
- os.unlink(self.localscript)
-
-class Configurator(SshConnectionConfigurator, EC2InstanceConfigurator, ScriptFileConfigurator, BaseHostConfigurator):
- def __init__(self, **kwargs):
- super(Configurator, self).__init__(**kwargs)
-
-class rackspaceInstance(nodeWrapper):
- def __init__(self, configurator, **kwargs):
- super(rackspaceInstance, self).__init__(configurator, **kwargs)
- self.imgname=configurator.default('rackspace_imgname')
-
- def start(self):
- driver = get_driver(Provider.RACKSPACE)
- self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
- name=self.imgname+'-'+str(os.getpid())
- images=filter(lambda x: (x.name.find(self.imgname) > -1), self.conn.list_images())
- sizes=self.conn.list_sizes()
- sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
- node = None
- if len(images) > 1:
- raise Exception("too many images with \"%s\" in the name" % self.imgname)
- if len(images) < 1:
- raise Exception("too few images with \"%s\" in the name" % self.imgname)
- image = images[0]
- self.node = self.conn.create_node(image=image, name=name, size=sizes[0])
- # Note: the password is available only in the response to the
- # create_node request, not in subsequent list_nodes()
- # requests; so although the node objects we get back from
- # list_nodes() are usuable for most things, we must hold onto
- # the initial password.
- self.password = self.node.extra['password']
- print self.node
-
- def list_nodes(self):
- return self.conn.list_nodes()
-
- def setup(self):
- self.putSshKey()
-
- def putSshKey(self):
- keyfile=settings.makedist['ssh_keyfile']
- ssh = ParamikoSSHClient(hostname = self.node.public_ip[0], password = self.password)
- ssh.connect()
- print "putting ssh public key"
- ssh.put(".ssh/authorized_keys", contents=open(keyfile+'.pub').read(), chmod=0600)
- print "ok"
-
-def parse_mongo_version_spec (spec):
- foo = spec.split(":")
- mongo_version = foo[0] # this can be a commit id, a
- # release id "r1.2.2", or a branch name
- # starting with v.
- if len(foo) > 1:
- pkg_name_suffix = foo[1]
- if len(foo) > 2 and foo[2]:
- pkg_version = foo[2]
- else:
- pkg_version = time.strftime("%Y%m%d")
- if not pkg_name_suffix:
- if mongo_version[0] in ["r", "v"]:
- nums = mongo_version.split(".")
- if int(nums[1]) % 2 == 0:
- pkg_name_suffix = "-stable"
- else:
- pkg_name_suffix = "-unstable"
- else:
- pkg_name_suffix = ""
- return (mongo_version, pkg_name_suffix, pkg_version)
-
-def main():
-# checkEnvironment()
-
- (kwargs, args) = processArguments()
- (rootdir, distro_name, distro_version, arch, mongo_version_spec) = args[:5]
- # FIXME: there are a few other characters that we can't use in
- # file names on Windows, in case this program really needs to run
- # there.
- distro_name = distro_name.replace('/', '-').replace('\\', '-')
- distro_version = distro_version.replace('/', '-').replace('\\', '-')
- arch = arch.replace('/', '-').replace('\\', '-')
- try:
- import settings
- if "makedist" in dir ( settings ):
- for key in ["ec2_sshkey", "ssh_keyfile", "gpg_homedir" ]:
- if key not in kwargs and key in settings.makedist:
- kwargs[key] = settings.makedist[key]
- except Exception, err:
- print "No settings: %s. Continuing anyway..." % err
- pass
-
- kwargs["distro_name"] = distro_name
- kwargs["distro_version"] = distro_version
- kwargs["arch"] = arch
- kwargs['mongo_version_spec'] = mongo_version_spec
-
- kwargs["localdir"] = rootdir
- # FIXME: this should also include the mongo version or something.
-# if "subdirs" in kwargs:
-# kwargs["localdir"] = "%s/%s/%s/%s/%s" % (rootdir, distro_name, distro_version, arch, kwargs["mongo_version"])
-# else:
-
-
-
-
- kwargs['gpg_homedir'] = kwargs["gpg_homedir"] if "gpg_homedir" in kwargs else os.path.expanduser("~/.gnupg")
- configurator = Configurator(**kwargs)
- LocalHost.runLocally(["mkdir", "-p", kwargs["localdir"]])
- with ScriptFile(configurator, **kwargs) as script:
- with open(script.localscript) as f:
- print """# Going to run the following on a fresh AMI:"""
- print f.read()
- time.sleep(10)
- # FIXME: it's not the best to have two different pathways for
- # the different hosting services, but...
- with EC2Instance(configurator, **kwargs) if kwargs['distro_name'] != 'fedora' else rackspaceInstance(configurator, **kwargs) as host:
- host.initwait()
- host.setup()
- kwargs["ssh_host"] = host.getHostname()
- with SshConnection(configurator, **kwargs) as ssh:
- ssh.runRemotely(["uname -a; ls /"])
- ssh.runRemotely(["mkdir", "pkg"])
- if "local_mongo_dir" in kwargs:
- ssh.sendFiles([(kwargs["local_mongo_dir"]+'/'+d, "pkg") for d in ["rpm", "debian"]])
- ssh.sendFiles([(kwargs['gpg_homedir'], ".gnupg")])
- ssh.sendFiles([(script.localscript, "makedist.sh")])
- ssh.runRemotely((["sudo"] if ssh.ssh_login != "root" else [])+ ["sh", "makedist.sh"])
- ssh.recvFiles([(script.pkg_product_dir, kwargs['localdir'])])
-
-def processArguments():
- # flagspec [ (short, long, argument?, description, argname)* ]
- flagspec = [ ("?", "usage", False, "Print a (useless) usage message", None),
- ("h", "help", False, "Print a help message and exit", None),
- ("N", "no-terminate", False, "Leave the EC2 instance running at the end of the job", None),
- ("S", "subdirs", False, "Create subdirectories of the output directory based on distro name, version, and architecture", None),
- ("I", "use-internal-name", False, "Use the EC2 internal hostname for sshing", None),
- (None, "gpg-homedir", True, "Local directory of gpg junk", "STRING"),
- (None, "local-mongo-dir", True, "Copy packaging files from local mongo checkout", "DIRECTORY"),
- ]
- shortopts = "".join([t[0] + (":" if t[2] else "") for t in flagspec if t[0] is not None])
- longopts = [t[1] + ("=" if t[2] else "") for t in flagspec]
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
- except getopt.GetoptError, err:
- print str(err)
- sys.exit(2)
-
- # Normalize the getopt-parsed options.
- kwargs = {}
- for (opt, arg) in opts:
- flag = opt
- opt = opt.lstrip("-")
- if flag[:2] == '--': #long opt
- kwargs[opt.replace('-', '_')] = arg
- elif flag[:1] == "-": #short opt
- ok = False
- for tuple in flagspec:
- if tuple[0] == opt:
- ok = True
- kwargs[tuple[1].replace('-', '_')] = arg
- break
- if not ok:
- raise SimpleError("this shouldn't happen: unrecognized option flag: %s", opt)
- else:
- raise SimpleError("this shouldn't happen: non-option returned from getopt()")
-
- if "help" in kwargs:
- print "Usage: %s [OPTIONS] DIRECTORY DISTRO DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
- print """Build some packages on new EC2 AMI instances, leave packages under DIRECTORY.
-
-MONGO-VERSION-SPEC has the syntax
-Commit(:Pkg-Name-Suffix(:Pkg-Version)). If Commit starts with an 'r',
-build from a tagged release; if Commit starts with an 'n', package up
-a nightly build; if Commit starts with a 'v', build from the HEAD of a
-version branch; otherwise, build whatever git commit is identified by
-Commit. Pkg-Name-Suffix gets appended to the package name, and
-defaults to "-stable" and "-unstable" if Commit looks like it
-designates a stable or unstable release/branch, respectively.
-Pkg-Version is used as the package version, and defaults to YYYYMMDD.
-Examples:
-
- HEAD # build a snapshot of HEAD, name the package
- # "mongodb", use YYYYMMDD for the version
-
- HEAD:-snap # build a snapshot of HEAD, name the package
- # "mongodb-snap", use YYYYMMDD for the version
-
- HEAD:-snap:123 # build a snapshot of HEAD, name the package
- # "mongodb-snap", use 123 for the version
-
- HEAD:-suffix:1.3 # build a snapshot of HEAD, name the package
- # "mongodb-snapshot", use "1.3 for the version
-
- r1.2.3 # build a package of the 1.2.3 release, call it "mongodb-stable",
- # make the package version YYYYMMDD.
-
- v1.2:-stable: # build a package of the HEAD of the 1.2 branch
-
- decafbad:-foo:123 # build git commit "decafbad", call the package
- # "mongodb-foo" with package version 123.
-
-Options:"""
- for t in flagspec:
- print "%-20s\t%s." % ("%4s--%s%s:" % ("-%s, " % t[0] if t[0] else "", t[1], ("="+t[4]) if t[4] else ""), t[3])
- print """
-Mandatory arguments to long options are also mandatory for short
-options."""
- sys.exit(0)
-
- if "usage" in kwargs:
- print "Usage: %s [OPTIONS] OUTPUT-DIR DISTRO-NAME DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
- sys.exit(0)
-
-
- return (kwargs, args)
-
-
-if __name__ == "__main__":
- main()
-
-# Examples:
-
-# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot,v1.4:-stable,v1.5:-unstable
-# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 nlatest:-snapshot,n1.4.2:-stable,n1.5.0:-unstable
diff --git a/buildscripts/mergerepositories.py b/buildscripts/mergerepositories.py
deleted file mode 100644
index 028b6e2..0000000
--- a/buildscripts/mergerepositories.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import with_statement
-from libcloud.types import Provider
-from libcloud.providers import get_driver
-from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
-from libcloud.base import Node, NodeImage, NodeSize, NodeState
-
-# libcloud's SSH client seems to be one of those pointless wrappers
-# that (at the moment) both doesn't add anything to the thing it wraps
-# (Paramiko) and also fails to expose the underlying thing's features.
-# What's wrong with people?
-#from libcloud.ssh import SSHClient
-
-import time
-import sys
-import settings
-import subprocess
-import os
-import socket
-
-EC2 = get_driver(Provider.EC2)
-EC2Driver=EC2NodeDriver(settings.id, settings.key)
-
-def tryEC2():
-
- image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- node = None
- try:
- node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, keyname="kp1", securitygroup=['default', 'dist-slave', 'buildbot-slave'])
- print node
- print node.id
- while node.state == NodeState.PENDING:
- time.sleep(3)
- finally:
- if node:
- node.destroy()
-
-
-class node(object):
- def initWait(self):
- while 1:
- n=None
- # EC2 sometimes takes a while to report a node.
- for i in range(6):
- nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
- if len(nodes)>0:
- n=nodes[0]
- break
- else:
- time.sleep(10)
- if not n:
- raise Exception("couldn't find node with id %s" % self.node.id)
- if n.state == NodeState.PENDING:
- time.sleep(10)
- else:
- self.node = n
- break
- print "ok"
- # Now wait for the node's sshd to be accepting connections.
- print "waiting for ssh"
- sshwait = True
- if sshwait == False:
- return
- while sshwait:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- try:
- s.connect((self.node.public_ip[0], 22))
- sshwait = False
- print "connected on port 22 (ssh)"
- time.sleep(15) # arbitrary timeout, in case the
- # remote sshd is slow.
- except socket.error, err:
- pass
- finally:
- s.close()
- time.sleep(3) # arbitrary timeout
- print "ok"
-
- def __enter__(self):
- return self
-
- def __exit__(self, arg0, arg1, arg2):
- print "shutting down node %s" % self.node
- self.node.destroy()
-
-# I don't think libcloud's Nodes implement __enter__ and __exit__, and
-# I like the with statement for ensuring that we don't leak nodes when
-# we don't have to.
-class ec2node(node):
- def list_nodes(self):
- return EC2Driver.list_nodes()
-
-class ubuntuNode(ec2node):
- def __init__(self):
- image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
-
-class centosNode(ec2node):
- def __init__(self):
- image=NodeImage('ami-ccb35ea5', 'ubuntu 10.4', EC2)
- size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
-
- self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
-
-class rackspaceNode(node):
- def list_nodes(self):
- self.conn.list_nodes()
-
-class fedora11Node(rackspaceNode):
- def __init__(self):
- driver = get_driver(Provider.RACKSPACE)
- self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
- string='Fedora 11'
- images=filter(lambda x: (x.name.find(string) > -1), self.conn.list_images())
- sizes=self.conn.list_sizes()
- sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
- node = None
- if len(images) != 1:
- raise "too many images with \"%s\" in the name" % string
- image = images[0]
- self.node = self.conn.create_node(image=image, name=string, size=sizes[0])
- print self.node
- self.password = self.node.extra['password']
-
-class Err(Exception):
- pass
-
-def merge_yum_repo(dir, outdir):
- dirtail=dir.rstrip('\/').split('/')[-1]
- keyfile=settings.makedist['ssh_keyfile']
- makeyumrepo="""find . -name RPMS | while read dir; do (cd $dir/.. && createrepo .); done"""
- with centosNode() as centos:
- centos.initWait()
- print centos.node
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "root@"+centos.node.public_ip[0]+":"])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "root@"+centos.node.public_ip[0], "cd ./" + dirtail + " && " + makeyumrepo])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "root@"+centos.node.public_ip[0]+":./"+dirtail +'/*', outdir])
-
-
-
-def merge_apt_repo(dir, outdir):
- dirtail=dir.rstrip('\/').split('/')[-1]
-
- gpgdir=settings.makedist['gpg_homedir']
- keyfile=settings.makedist['ssh_keyfile']
-
- makeaptrepo="""for x in debian ubuntu; do (cd $x; for d in `find . -name *.deb | sed 's|^./||; s|/[^/]*$||' | sort -u`; do dpkg-scanpackages $d > $d/Packages; gzip -9c $d/Packages > $d/Packages.gz; done) ; done"""
- makereleaseprologue="""Origin: 10gen
-Label: 10gen
-Suite: 10gen
-Codename: VVVVVV
-Version: VVVVVV
-Architectures: i386 amd64
-Components: 10gen
-Description: 10gen packages"""
- makeaptrelease="""find . -maxdepth 3 -mindepth 3 | while read d; do ( cd $d && (echo '%s' | sed s/VVVVVV/$(basename $(pwd))/; apt-ftparchive release .) > /tmp/Release && mv /tmp/Release . && gpg -r `gpg --list-keys | grep uid | awk '{print $(NF)}'` --no-secmem-warning --no-tty -abs --output Release.gpg Release ); done""" % makereleaseprologue
- with ubuntuNode() as ubuntu:
- ubuntu.initWait()
- print ubuntu.node
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sudo", "sh", "-c", "\"export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -y install debhelper\""])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "ubuntu@"+ubuntu.node.public_ip[0]+":"])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", gpgdir, "ubuntu@"+ubuntu.node.public_ip[0]+":.gnupg"])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sh", "-c", "\"ls -lR ./" + dirtail + "\""])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrepo])
- run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrelease])
- run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "ubuntu@"+ubuntu.node.public_ip[0]+":./"+dirtail +'/*', outdir])
-
-
-def run_for_effect(argv):
- print " ".join(argv)
- r=subprocess.Popen(argv).wait()
- if r!=0:
- raise Err("subprocess %s exited %d" % (argv, r))
-
-if __name__ == "__main__":
- (flavor, dir, outdir) = sys.argv[-3:]
-
- if flavor == "deb":
- merge_apt_repo(dir, outdir)
- elif flavor == "rpm":
- merge_yum_repo(dir, outdir)
- else:
- Err("unknown pkg flavor %s" % flavor)
- # TODO: yum repositories
-
-
- #main()
- #tryRackSpace()
diff --git a/buildscripts/packager.py b/buildscripts/packager.py
new file mode 100644
index 0000000..400239c
--- /dev/null
+++ b/buildscripts/packager.py
@@ -0,0 +1,982 @@
+#!/usr/bin/python
+
+# This program makes Debian and RPM repositories for MongoDB, by
+# downloading our tarballs of statically linked executables and
+# insinuating them into Linux packages. It must be run on a
+# Debianoid, since Debian provides tools to make RPMs, but RPM-based
+# systems don't provide debian packaging crud.
+
+# Notes:
+#
+# * Almost anything that you want to be able to influence about how a
+# package construction must be embedded in some file that the
+# packaging tool uses for input (e.g., debian/rules, debian/control,
+# debian/changelog; or the RPM specfile), and the precise details are
+# arbitrary and silly. So this program generates all the relevant
+# inputs to the packaging tools.
+#
+# * Once a .deb or .rpm package is made, there's a separate layer of
+# tools that makes a "repository" for use by the apt/yum layers of
+# package tools. The layouts of these repositories are arbitrary and
+# silly, too.
+#
+# * Before you run the program on a new host, these are the
+# prerequisites:
+#
+# apt-get install dpkg-dev rpm debhelper fakeroot ia32-libs createrepo git-core
+# echo "Now put the dist gnupg signing keys in ~root/.gnupg"
+
+import errno
+import getopt
+import httplib
+import os
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+import urlparse
+
+# For the moment, this program runs on the host that also serves our
+# repositories to the world, so the last thing the program does is
+# move the repositories into place. Make this be the path where the
+# web server will look for repositories.
+REPOPATH="/var/www/repo"
+
+# The 10gen names for the architectures we support.
+ARCHES=["i686", "x86_64"]
+
+# Made up names for the flavors of distribution we package for.
+DISTROS=["debian-sysvinit", "ubuntu-upstart", "redhat"]
+
+# When we're preparing a directory containing packaging tool inputs
+# and our binaries, use this relative subdirectory for placing the
+# binaries.
+BINARYDIR="BINARIES"
+
+class Spec(object):
+ def __init__(self, specstr):
+ tup = specstr.split(":")
+ self.ver = tup[0]
+ # Hack: the second item in the tuple is treated as a suffix if
+ # it lacks an equals sign; otherwise it's the start of named
+ # parameters.
+ self.suf = None
+ if len(tup) > 1 and tup[1].find("=") == -1:
+ self.suf = tup[1]
+ # Catch-all for any other parameters to the packaging.
+ i = 2 if self.suf else 1
+ self.params = dict([s.split("=") for s in tup[i:]])
+ for key in self.params.keys():
+ assert(key in ["suffix", "revision"])
+
+ def version(self):
+ return self.ver
+
+ def version_better_than(self, version_string):
+ # FIXME: this is wrong, but I'm in a hurry.
+ # e.g., "1.8.2" < "1.8.10", "1.8.2" < "1.8.2-rc1"
+ return self.ver > version_string
+
+ def suffix(self):
+ # suffix is what we tack on after pkgbase.
+ if self.suf:
+ return self.suf
+ elif "suffix" in self.params:
+ return self.params["suffix"]
+ else:
+ return "-10gen" if int(self.ver.split(".")[1])%2==0 else "-10gen-unstable"
+
+
+ def pversion(self, distro):
+ # Note: Debian packages have funny rules about dashes in
+ # version numbers, and RPM simply forbids dashes. pversion
+ # will be the package's version number (but we need to know
+ # our upstream version too).
+ if re.search("^(debian|ubuntu)", distro.name()):
+ return re.sub("-", "~", self.ver)
+ elif re.search("(redhat|fedora|centos)", distro.name()):
+ return re.sub("\\d+-", "", self.ver)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def param(self, param):
+ if param in self.params:
+ return self.params[param]
+ return None
+
+class Distro(object):
+ def __init__(self, string):
+ self.n=string
+
+ def name(self):
+ return self.n
+
+ def pkgbase(self):
+ # pkgbase is the first part of the package's name on
+ # this distro.
+ return "mongo" if re.search("(redhat|fedora|centos)", self.n) else "mongodb"
+
+ def archname(self, arch):
+ if re.search("^(debian|ubuntu)", self.n):
+ return "i386" if arch.endswith("86") else "amd64"
+ elif re.search("^(centos|redhat|fedora)", self.n):
+ return "i686" if arch.endswith("86") else "x86_64"
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def repodir(self, arch):
+ """Return the directory where we'll place the package files for
+ (distro, distro_version) in that distro's preferred repository
+ layout (as distinct from where that distro's packaging building
+ tools place the package files)."""
+ if re.search("^(debian|ubuntu)", self.n):
+ return "repo/%s/dists/dist/10gen/binary-%s/" % (self.n, self.archname(arch))
+ elif re.search("(redhat|fedora|centos)", self.n):
+ return "repo/%s/os/%s/RPMS/" % (self.n, self.archname(arch))
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+ def make_pkg(self, arch, spec, srcdir):
+ if re.search("^(debian|ubuntu)", self.n):
+ return make_deb(self, arch, spec, srcdir)
+ elif re.search("^(centos|redhat|fedora)", self.n):
+ return make_rpm(self, arch, spec, srcdir)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+def main(argv):
+ (flags, specs) = parse_args(argv[1:])
+ distros=[Distro(distro) for distro in DISTROS]
+
+ oldcwd=os.getcwd()
+ srcdir=oldcwd+"/../"
+
+ # We do all our work in a randomly-created directory. You can set
+ # TEMPDIR to influence where this program will do stuff.
+ prefix=tempfile.mkdtemp()
+ print "Working in directory %s" % prefix
+
+ # This will be a list of directories where we put packages in
+ # "repository layout".
+ repos=[]
+
+ os.chdir(prefix)
+ try:
+ # Download the binaries.
+ urlfmt="http://fastdl.mongodb.org/linux/mongodb-linux-%s-%s.tgz"
+ for (spec, arch) in crossproduct(specs, ARCHES):
+ httpget(urlfmt % (arch, spec.version()), ensure_dir(tarfile(arch, spec)))
+
+ # Build a pacakge for each distro/spec/arch tuple, and
+ # accumulate the repository-layout directories.
+ for (distro, spec, arch) in crossproduct(distros, specs, ARCHES):
+ repos.append(make_package(distro, arch, spec, srcdir))
+
+ # Build the repos' metadatas.
+ for repo in set(repos):
+ print repo
+ make_repo(repo)
+
+ finally:
+ os.chdir(oldcwd)
+ if "-n" not in flags:
+ move_repos_into_place(prefix+"/repo", REPOPATH)
+ # FIXME: try shutil.rmtree some day.
+ sysassert(["rm", "-rv", prefix])
+
+def parse_args(args):
+ if len(args) == 0:
+ print """Usage: packager.py [OPTS] SPEC1 SPEC2 ... SPECn
+
+Options:
+
+ -n: Just build the packages, don't publish them as a repo
+ or clean out the working directory
+
+Each SPEC is a mongodb version string optionally followed by a colon
+and some parameters, of the form <paramname>=<value>. Supported
+parameters:
+
+ suffix -- suffix to append to the package's base name. (If
+ unsupplied, suffixes default based on the parity of the
+ middle number in the version.)
+
+ revision -- least-order version number to packaging systems
+"""
+ sys.exit(0)
+
+ try:
+ (flags, args) = getopt.getopt(args, "n")
+ except getopt.GetoptError, err:
+ print str(err)
+ sys.exit(2)
+ flags=dict(flags)
+ specs=[Spec(arg) for arg in args]
+ return (flags, specs)
+
+def crossproduct(*seqs):
+ """A generator for iterating all the tuples consisting of elements
+ of seqs."""
+ l = len(seqs)
+ if l == 0:
+ pass
+ elif l == 1:
+ for i in seqs[0]:
+ yield [i]
+ else:
+ for lst in crossproduct(*seqs[:-1]):
+ for i in seqs[-1]:
+ lst2=list(lst)
+ lst2.append(i)
+ yield lst2
+
+def sysassert(argv):
+ """Run argv and assert that it exited with status 0."""
+ print "In %s, running %s" % (os.getcwd(), " ".join(argv))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ assert(subprocess.Popen(argv).wait()==0)
+
+def backtick(argv):
+ """Run argv and return its output string."""
+ print "In %s, running %s" % (os.getcwd(), " ".join(argv))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ return subprocess.Popen(argv, stdout=subprocess.PIPE).communicate()[0]
+
+def ensure_dir(filename):
+ """Make sure that the directory that's the dirname part of
+ filename exists, and return filename."""
+ dirpart = os.path.dirname(filename)
+ try:
+ os.makedirs(dirpart)
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ return filename
+
+
+def tarfile(arch, spec):
+ """Return the location where we store the downloaded tarball for
+ (arch, spec)"""
+ return "dl/mongodb-linux-%s-%s.tar.gz" % (spec.version(), arch)
+
+def setupdir(distro, arch, spec):
+ # The setupdir will be a directory containing all inputs to the
+ # distro's packaging tools (e.g., package metadata files, init
+ # scripts, etc), along with the already-built binaries). In case
+ # the following format string is unclear, an example setupdir
+ # would be dst/x86_64/debian-sysvinit/mongodb-10gen-unstable/
+ return "dst/%s/%s/%s%s-%s/" % (arch, distro.name(), distro.pkgbase(), spec.suffix(), spec.pversion(distro))
+
+def httpget(url, filename):
+ """Download the contents of url to filename, return filename."""
+ print "Fetching %s to %s." % (url, filename)
+ conn = None
+ u=urlparse.urlparse(url)
+ assert(u.scheme=='http')
+ try:
+ conn = httplib.HTTPConnection(u.hostname)
+ conn.request("GET", u.path)
+ t=filename+'.TMP'
+ res = conn.getresponse()
+ # FIXME: follow redirects
+ if res.status==200:
+ f = open(t, 'w')
+ try:
+ f.write(res.read())
+ finally:
+ f.close()
+
+ else:
+ raise Exception("HTTP error %d" % res.status)
+ os.rename(t, filename)
+ finally:
+ if conn:
+ conn.close()
+ return filename
+
+def unpack_binaries_into(arch, spec, where):
+ """Unpack the tarfile for (arch, spec) into directory where."""
+ rootdir=os.getcwd()
+ ensure_dir(where)
+ # Note: POSIX tar doesn't require support for gtar's "-C" option,
+ # and Python's tarfile module prior to Python 2.7 doesn't have the
+ # features to make this detail easy. So we'll just do the dumb
+ # thing and chdir into where and run tar there.
+ os.chdir(where)
+ try:
+ sysassert(["tar", "xvzf", rootdir+"/"+tarfile(arch, spec), "mongodb-linux-%s-%s/bin" % (arch, spec.version())])
+ os.rename("mongodb-linux-%s-%s/bin" % (arch, spec.version()), "bin")
+ os.rmdir("mongodb-linux-%s-%s" % (arch, spec.version()))
+ except Exception:
+ exc=sys.exc_value
+ os.chdir(rootdir)
+ raise exc
+ os.chdir(rootdir)
+
+def make_package(distro, arch, spec, srcdir):
+ """Construct the package for (arch, distro, spec), getting
+ packaging files from srcdir and any user-specified suffix from
+ suffixes"""
+
+ sdir=setupdir(distro, arch, spec)
+ ensure_dir(sdir)
+ # Note that the RPM packages get their man pages from the debian
+ # directory, so the debian directory is needed in all cases (and
+ # innocuous in the debianoids' sdirs).
+ for pkgdir in ["debian", "rpm"]:
+ print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
+ # FIXME: sh-dash-cee is bad. See if tarfile can do this.
+ sysassert(["sh", "-c", "(cd \"%s\" && git archive r%s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.version(), pkgdir, sdir)])
+ # Splat the binaries under sdir. The "build" stages of the
+ # packaging infrastructure will move the binaries to wherever they
+ # need to go.
+ unpack_binaries_into(arch, spec, sdir+("%s/usr/"%BINARYDIR))
+ # Remove the mongosniff binary due to libpcap dynamic
+ # linkage. FIXME: this removal should go away
+ # eventually.
+ os.unlink(sdir+("%s/usr/bin/mongosniff"%BINARYDIR))
+ return distro.make_pkg(arch, spec, srcdir)
+
+def make_repo(repodir):
+ if re.search("(debian|ubuntu)", repodir):
+ make_deb_repo(repodir)
+ elif re.search("(centos|redhat|fedora)", repodir):
+ make_rpm_repo(repodir)
+ else:
+ raise Exception("BUG: unsupported platform?")
+
+def make_deb(distro, arch, spec, srcdir):
+ # I can't remember the details anymore, but the initscript/upstart
+ # job files' names must match the package name in some way; and
+ # see also the --name flag to dh_installinit in the generated
+ # debian/rules file.
+ suffix=spec.suffix()
+ sdir=setupdir(distro, arch, spec)
+ if re.search("sysvinit", distro.name()):
+ os.link(sdir+"debian/init.d", sdir+"debian/%s%s.mongodb.init" % (distro.pkgbase(), suffix))
+ os.unlink(sdir+"debian/mongodb.upstart")
+ elif re.search("upstart", distro.name()):
+ os.link(sdir+"debian/mongodb.upstart", sdir+"debian/%s%s.upstart" % (distro.pkgbase(), suffix))
+ os.unlink(sdir+"debian/init.d")
+ else:
+ raise Exception("unknown debianoid flavor: not sysvinit or upstart?")
+ # Rewrite the control and rules files
+ write_debian_control_file(sdir+"debian/control", spec)
+ write_debian_rules_file(sdir+"debian/rules", spec)
+ write_debian_changelog(sdir+"debian/changelog", spec, srcdir)
+ distro_arch=distro.archname(arch)
+ # Do the packaging.
+ oldcwd=os.getcwd()
+ try:
+ os.chdir(sdir)
+ sysassert(["dpkg-buildpackage", "-a"+distro_arch])
+ finally:
+ os.chdir(oldcwd)
+ r=distro.repodir(arch)
+ ensure_dir(r)
+ # FIXME: see if shutil.copyfile or something can do this without
+ # much pain.
+ sysassert(["cp", "-v", sdir+"../%s%s_%s%s_%s.deb"%(distro.pkgbase(), suffix, spec.pversion(distro), "-"+spec.param("revision") if spec.param("revision") else"", distro_arch), r])
+ return r
+
+def make_deb_repo(repo):
+ # Note: the Debian repository Packages files must be generated
+ # very carefully in order to be usable.
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../../../../")
+ try:
+ dirs=set([os.path.dirname(deb)[2:] for deb in backtick(["find", ".", "-name", "*.deb"]).split()])
+ for d in dirs:
+ s=backtick(["dpkg-scanpackages", d, "/dev/null"])
+ f=open(d+"/Packages", "w")
+ try:
+ f.write(s)
+ finally:
+ f.close()
+ b=backtick(["gzip", "-9c", d+"/Packages"])
+ f=open(d+"/Packages.gz", "wb")
+ try:
+ f.write(b)
+ finally:
+ f.close()
+ finally:
+ os.chdir(oldpwd)
+ # Notes: the Release{,.gpg} files must live in a special place,
+ # and must be created after all the Packages.gz files have been
+ # done.
+ s="""
+Origin: 10gen
+Label: 10gen
+Suite: 10gen
+Codename: %s
+Version: %s
+Architectures: i386 amd64
+Components: 10gen
+Description: 10gen packages
+""" % ("dist", "dist")
+ if os.path.exists(repo+"../../Release"):
+ os.unlink(repo+"../../Release")
+ if os.path.exists(repo+"../../Release.gpg"):
+ os.unlink(repo+"../../Release.gpg")
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../../")
+ s2=backtick(["apt-ftparchive", "release", "."])
+ try:
+ f=open("Release", 'w')
+ try:
+ f.write(s)
+ f.write(s2)
+ finally:
+ f.close()
+
+ arg=None
+ for line in backtick(["gpg", "--list-keys"]).split("\n"):
+ tokens=line.split()
+ if len(tokens)>0 and tokens[0] == "uid":
+ arg=tokens[-1]
+ break
+ # Note: for some reason, I think --no-tty might be needed
+ # here, but maybe not.
+ sysassert(["gpg", "-r", arg, "--no-secmem-warning", "-abs", "--output", "Release.gpg", "Release"])
+ finally:
+ os.chdir(oldpwd)
+
+
+def move_repos_into_place(src, dst):
+ # Find all the stuff in src/*, move it to a freshly-created
+ # directory beside dst, then play some games with symlinks so that
+ # dst is a name the new stuff and dst+".old" names the previous
+ # one. This feels like a lot of hooey for something so trivial.
+
+ # First, make a crispy fresh new directory to put the stuff in.
+ i=0
+ while True:
+ date_suffix=time.strftime("%Y-%m-%d")
+ dname=dst+".%s.%d" % (date_suffix, i)
+ try:
+ os.mkdir(dname)
+ break
+ except OSError:
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ i=i+1
+
+ # Put the stuff in our new directory.
+ for r in os.listdir(src):
+ sysassert(["cp", "-rv", src + "/" + r, dname])
+
+ # Make a symlink to the new directory; the symlink will be renamed
+ # to dst shortly.
+ i=0
+ while True:
+ tmpnam=dst+".TMP.%d" % i
+ try:
+ os.symlink(dname, tmpnam)
+ break
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+ i=i+1
+
+ # Make a symlink to the old directory; this symlink will be
+ # renamed shortly, too.
+ oldnam=None
+ if os.path.exists(dst):
+ i=0
+ while True:
+ oldnam=dst+".old.%d" % i
+ try:
+ os.symlink(os.readlink(dst), oldnam)
+ break
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+
+ os.rename(tmpnam, dst)
+ if oldnam:
+ os.rename(oldnam, dst+".old")
+
+
+def write_debian_changelog(path, spec, srcdir):
+ oldcwd=os.getcwd()
+ os.chdir(srcdir)
+ preamble=""
+ if spec.param("revision"):
+ preamble="""mongodb%s (%s-%s) unstable; urgency=low
+
+ * Bump revision number
+
+ -- Richard Kreuter <richard@10gen.com> %s
+
+""" % (spec.suffix(), spec.pversion(Distro("debian")), spec.param("revision"), time.strftime("%a, %d %b %Y %H:%m:%S %z"))
+ try:
+ s=preamble+backtick(["sh", "-c", "git archive r%s debian/changelog | tar xOf -" % spec.version()])
+ finally:
+ os.chdir(oldcwd)
+ f=open(path, 'w')
+ lines=s.split("\n")
+ # If the first line starts with "mongodb", it's not a revision
+ # preamble, and so frob the version number.
+ lines[0]=re.sub("^mongodb \\(.*\\)", "mongodb (%s)" % (spec.pversion(Distro("debian"))), lines[0])
+ # Rewrite every changelog entry starting in mongodb<space>
+ lines=[re.sub("^mongodb ", "mongodb%s " % (spec.suffix()), l) for l in lines]
+ lines=[re.sub("^ --", " --", l) for l in lines]
+ s="\n".join(lines)
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+def write_debian_control_file(path, spec):
+ s="""Source: @@PACKAGE_BASENAME@@
+Section: devel
+Priority: optional
+Maintainer: Richard Kreuter <richard@10gen.com>
+Build-Depends:
+Standards-Version: 3.8.0
+Homepage: http://www.mongodb.org
+
+Package: @@PACKAGE_BASENAME@@
+Conflicts: @@PACKAGE_CONFLICTS@@
+Architecture: any
+Depends: libc6 (>= 2.3.2), libgcc1 (>= 1:4.1.1), libstdc++6 (>= 4.1.1)
+Description: An object/document-oriented database
+ MongoDB is a high-performance, open source, schema-free
+ document-oriented data store that's easy to deploy, manage
+ and use. It's network accessible, written in C++ and offers
+ the following features :
+ .
+ * Collection oriented storage - easy storage of object-
+ style data
+ * Full index support, including on inner objects
+ * Query profiling
+ * Replication and fail-over support
+ * Efficient storage of binary data including large
+ objects (e.g. videos)
+ * Auto-sharding for cloud-level scalability (Q209)
+ .
+ High performance, scalability, and reasonable depth of
+ functionality are the goals for the project.
+"""
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongodb%s" % spec.suffix(), s)
+ conflict_suffixes=["", "-stable", "-unstable", "-nightly", "-10gen", "-10gen-unstable"]
+ conflict_suffixes.remove(spec.suffix())
+ s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongodb"+suffix for suffix in conflict_suffixes]), s)
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+def write_debian_rules_file(path, spec):
+ # Note debian/rules is a makefile, so for visual disambiguation we
+ # make all tabs here \t.
+ s="""#!/usr/bin/make -f
+# -*- makefile -*-
+# Sample debian/rules that uses debhelper.
+# This file was originally written by Joey Hess and Craig Small.
+# As a special exception, when this file is copied by dh-make into a
+# dh-make output file, you may use that output file without restriction.
+# This special exception was added by Craig Small in version 0.37 of dh-make.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+
+configure: configure-stamp
+configure-stamp:
+\tdh_testdir
+ # Add here commands to configure the package.
+
+\ttouch configure-stamp
+
+
+build: build-stamp
+
+build-stamp: configure-stamp
+\tdh_testdir
+
+ # Add here commands to compile the package.
+# THE FOLLOWING LINE IS INTENTIONALLY COMMENTED.
+\t# scons
+ #docbook-to-man debian/mongodb.sgml > mongodb.1
+\tls debian/*.1 > debian/@@PACKAGE_NAME@@.manpages
+
+\ttouch $@
+
+clean:
+\tdh_testdir
+\tdh_testroot
+\trm -f build-stamp configure-stamp
+
+\t# FIXME: scons freaks out at the presence of target files
+\t# under debian/mongodb.
+\t#scons -c
+\trm -rf $(CURDIR)/debian/@@PACKAGE_NAME@@
+\trm -f config.log
+\trm -f mongo
+\trm -f mongod
+\trm -f mongoimportjson
+\trm -f mongoexport
+\trm -f mongorestore
+\trm -f mongodump
+\trm -f mongofiles
+\trm -f .sconsign.dblite
+\trm -f libmongoclient.a
+\trm -rf client/*.o
+\trm -rf tools/*.o
+\trm -rf shell/*.o
+\trm -rf .sconf_temp
+\trm -f buildscripts/*.pyc
+\trm -f *.pyc
+\trm -f buildinfo.cpp
+\tdh_clean debian/files
+
+install: build
+\tdh_testdir
+\tdh_testroot
+\tdh_prep
+\tdh_installdirs
+
+# THE FOLLOWING LINE IS INTENTIONALLY COMMENTED.
+\t# scons --prefix=$(CURDIR)/debian/mongodb/usr install
+\tcp -v $(CURDIR)/@@BINARYDIR@@/usr/bin/* $(CURDIR)/debian/@@PACKAGE_NAME@@/usr/bin
+\tmkdir -p $(CURDIR)/debian/@@PACKAGE_NAME@@/etc
+\tcp $(CURDIR)/debian/mongodb.conf $(CURDIR)/debian/@@PACKAGE_NAME@@/etc/mongodb.conf
+
+\tmkdir -p $(CURDIR)/debian/@@PACKAGE_NAME@@/usr/share/lintian/overrides/
+\tinstall -m 644 $(CURDIR)/debian/lintian-overrides \
+\t\t$(CURDIR)/debian/@@PACKAGE_NAME@@/usr/share/lintian/overrides/@@PACKAGE_NAME@@
+
+# Build architecture-independent files here.
+binary-indep: build install
+# We have nothing to do by default.
+
+# Build architecture-dependent files here.
+binary-arch: build install
+\tdh_testdir
+\tdh_testroot
+\tdh_installchangelogs
+\tdh_installdocs
+\tdh_installexamples
+#\tdh_install
+#\tdh_installmenu
+#\tdh_installdebconf\t
+#\tdh_installlogrotate
+#\tdh_installemacsen
+#\tdh_installpam
+#\tdh_installmime
+\tdh_installinit --name=@@PACKAGE_BASENAME@@
+#\tdh_installinfo
+\tdh_installman
+\tdh_link
+\tdh_strip
+\tdh_compress
+\tdh_fixperms
+\tdh_installdeb
+\tdh_shlibdeps
+\tdh_gencontrol
+\tdh_md5sums
+\tdh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
+"""
+ s=re.sub("@@PACKAGE_NAME@@", "mongodb%s" % spec.suffix(), s)
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongodb", s)
+ s=re.sub("@@BINARYDIR@@", BINARYDIR, s)
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+ # FIXME: some versions of debianoids seem to
+ # need the rules file to be 755?
+ os.chmod(path, stat.S_IXUSR|stat.S_IWUSR|stat.S_IRUSR|stat.S_IXGRP|stat.S_IRGRP|stat.S_IXOTH|stat.S_IWOTH)
+
+def make_rpm(distro, arch, spec, srcdir):
+ # Create the specfile.
+ suffix=spec.suffix()
+ sdir=setupdir(distro, arch, spec)
+ specfile=sdir+"rpm/mongo%s.spec" % suffix
+ write_rpm_spec_file(specfile, spec)
+ topdir=ensure_dir(os.getcwd()+'/rpmbuild/')
+ for subdir in ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
+ ensure_dir("%s/%s/" % (topdir, subdir))
+ distro_arch=distro.archname(arch)
+ # RPM tools take these macro files that define variables in
+ # RPMland. Unfortunately, there's no way to tell RPM tools to use
+ # a given file *in addition* to the files that it would already
+ # load, so we have to figure out what it would normally load,
+ # augment that list, and tell RPM to use the augmented list. To
+ # figure out what macrofiles ordinarily get loaded, older RPM
+ # versions had a parameter called "macrofiles" that could be
+ # extracted from "rpm --showrc". But newer RPM versions don't
+ # have this. To tell RPM what macros to use, older versions of
+ # RPM have a --macros option that doesn't work; on these versions,
+ # you can put a "macrofiles" parameter into an rpmrc file. But
+ # that "macrofiles" setting doesn't do anything for newer RPM
+ # versions, where you have to use the --macros flag instead. And
+ # all of this is to let us do our work with some guarantee that
+ # we're not clobbering anything that doesn't belong to us. Why is
+ # RPM so braindamaged?
+ macrofiles=[l for l in backtick(["rpm", "--showrc"]).split("\n") if l.startswith("macrofiles")]
+ flags=[]
+ macropath=os.getcwd()+"/macros"
+ write_rpm_macros_file(macropath, topdir)
+ if len(macrofiles)>0:
+ macrofiles=macrofiles[0]+":"+macropath
+ rcfile=os.getcwd()+"/rpmrc"
+ write_rpmrc_file(rcfile, macrofiles)
+ flags=["--rpmrc", rcfile]
+ else:
+ # This hard-coded hooey came from some box running RPM
+ # 4.4.2.3. It may not work over time, but RPM isn't sanely
+ # configurable.
+ flags=["--macros", "/usr/lib/rpm/macros:/usr/lib/rpm/%s-linux/macros:/etc/rpm/macros.*:/etc/rpm/macros:/etc/rpm/%s-linux/macros:~/.rpmmacros:%s" % (distro_arch, distro_arch, macropath)]
+ # Put the specfile and the tar'd up binaries and stuff in
+ # place. FIXME: see if shutil.copyfile can do this without too
+ # much hassle.
+ sysassert(["cp", "-v", specfile, topdir+"SPECS/"])
+ oldcwd=os.getcwd()
+ os.chdir(sdir+"/../")
+ try:
+ sysassert(["tar", "-cpzf", topdir+"SOURCES/mongo%s-%s.tar.gz" % (suffix, spec.pversion(distro)), os.path.basename(os.path.dirname(sdir))])
+ finally:
+ os.chdir(oldcwd)
+ # Do the build.
+ sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags + ["%s/SPECS/mongo%s.spec" % (topdir, suffix)])
+ r=distro.repodir(arch)
+ ensure_dir(r)
+ # FIXME: see if some combination of shutil.copy<hoohah> and glob
+ # can do this without shelling out.
+ sysassert(["sh", "-c", "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\""%(topdir, distro_arch, r)])
+ return r
+
+def make_rpm_repo(repo):
+ oldpwd=os.getcwd()
+ os.chdir(repo+"../")
+ try:
+ sysassert(["createrepo", "."])
+ finally:
+ os.chdir(oldpwd)
+
+
+def write_rpmrc_file(path, string):
+ f=open(path, 'w')
+ try:
+ f.write(string)
+ finally:
+ f.close()
+
+def write_rpm_macros_file(path, topdir):
+ f=open(path, 'w')
+ try:
+ f.write("%%_topdir %s" % topdir)
+ finally:
+ f.close()
+
+def write_rpm_spec_file(path, spec):
+ s="""Name: @@PACKAGE_BASENAME@@
+Conflicts: @@PACKAGE_CONFLICTS@@
+Obsoletes: @@PACKAGE_OBSOLETES@@
+Version: @@PACKAGE_VERSION@@
+Release: mongodb_@@PACKAGE_REVISION@@%{?dist}
+Summary: mongo client shell and tools
+License: AGPL 3.0
+URL: http://www.mongodb.org
+Group: Applications/Databases
+
+Source0: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+
+%description
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+It features dynamic profileable queries, full indexing, replication
+and fail-over support, efficient storage of large binary data objects,
+and auto-sharding.
+
+This package provides the mongo shell, import/export tools, and other
+client utilities.
+
+%package server
+Summary: mongo server, sharding server, and support scripts
+Group: Applications/Databases
+Requires: @@PACKAGE_BASENAME@@
+
+%description server
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo server software, mongo sharding server
+softwware, default configuration files, and init.d scripts.
+
+%package devel
+Summary: Headers and libraries for mongo development.
+Group: Applications/Databases
+
+%description devel
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo static library and header files needed
+to develop mongo client software.
+
+%prep
+%setup
+
+%build
+#scons --prefix=$RPM_BUILD_ROOT/usr all
+# XXX really should have shared library here
+
+%install
+#scons --prefix=$RPM_BUILD_ROOT/usr install
+mkdir -p $RPM_BUILD_ROOT/usr
+cp -rv @@BINARYDIR@@/usr/bin $RPM_BUILD_ROOT/usr
+mkdir -p $RPM_BUILD_ROOT/usr/share/man/man1
+cp debian/*.1 $RPM_BUILD_ROOT/usr/share/man/man1/
+# FIXME: remove this rm when mongosniff is back in the package
+rm -v $RPM_BUILD_ROOT/usr/share/man/man1/mongosniff.1*
+mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d
+cp -v rpm/init.d-mongod $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+chmod a+x $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
+mkdir -p $RPM_BUILD_ROOT/etc
+cp -v rpm/mongod.conf $RPM_BUILD_ROOT/etc/mongod.conf
+mkdir -p $RPM_BUILD_ROOT/etc/sysconfig
+cp -v rpm/mongod.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/mongod
+mkdir -p $RPM_BUILD_ROOT/var/lib/mongo
+mkdir -p $RPM_BUILD_ROOT/var/log/mongo
+touch $RPM_BUILD_ROOT/var/log/mongo/mongod.log
+
+%clean
+#scons -c
+rm -rf $RPM_BUILD_ROOT
+
+%pre server
+if ! /usr/bin/id -g mongod &>/dev/null; then
+ /usr/sbin/groupadd -r mongod
+fi
+if ! /usr/bin/id mongod &>/dev/null; then
+ /usr/sbin/useradd -M -r -g mongod -d /var/lib/mongo -s /bin/false \
+ -c mongod mongod > /dev/null 2>&1
+fi
+
+%post server
+if test $1 = 1
+then
+ /sbin/chkconfig --add mongod
+fi
+
+%preun server
+if test $1 = 0
+then
+ /sbin/chkconfig --del mongod
+fi
+
+%postun server
+if test $1 -ge 1
+then
+ /sbin/service mongod condrestart >/dev/null 2>&1 || :
+fi
+
+%files
+%defattr(-,root,root,-)
+#%doc README GNU-AGPL-3.0.txt
+
+%{_bindir}/bsondump
+%{_bindir}/mongo
+%{_bindir}/mongodump
+%{_bindir}/mongoexport
+%{_bindir}/mongofiles
+%{_bindir}/mongoimport
+%{_bindir}/mongorestore
+#@@VERSION>1.9@@%{_bindir}/mongotop
+%{_bindir}/mongostat
+# FIXME: uncomment when mongosniff is back in the package
+#%{_bindir}/mongosniff
+
+# FIXME: uncomment this when there's a stable release whose source
+# tree contains a bsondump man page.
+#@@VERSION>1.9@@%{_mandir}/man1/bsondump.1*
+%{_mandir}/man1/mongo.1*
+%{_mandir}/man1/mongodump.1*
+%{_mandir}/man1/mongoexport.1*
+%{_mandir}/man1/mongofiles.1*
+%{_mandir}/man1/mongoimport.1*
+%{_mandir}/man1/mongorestore.1*
+%{_mandir}/man1/mongostat.1*
+# FIXME: uncomment when mongosniff is back in the package
+#%{_mandir}/man1/mongosniff.1*
+
+%files server
+%defattr(-,root,root,-)
+%config(noreplace) /etc/mongod.conf
+%{_bindir}/mongod
+%{_bindir}/mongos
+%{_mandir}/man1/mongod.1*
+%{_mandir}/man1/mongos.1*
+/etc/rc.d/init.d/mongod
+/etc/sysconfig/mongod
+#/etc/rc.d/init.d/mongos
+%attr(0755,mongod,mongod) %dir /var/lib/mongo
+%attr(0755,mongod,mongod) %dir /var/log/mongo
+%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo/mongod.log
+
+%changelog
+* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
+- Minor fixes.
+
+* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
+- Wrote mongo.spec.
+"""
+ suffix=spec.suffix()
+ s=re.sub("@@PACKAGE_BASENAME@@", "mongo%s" % suffix, s)
+ s=re.sub("@@PACKAGE_VERSION@@", spec.pversion(Distro("redhat")), s)
+ # FIXME, maybe: the RPM guide says that Release numbers ought to
+ # be integers starting at 1, but we use "mongodb_1{%dist}",
+ # whatever the hell that means.
+ s=re.sub("@@PACKAGE_REVISION@@", str(int(spec.param("revision"))+1) if spec.param("revision") else "1", s)
+ s=re.sub("@@BINARYDIR@@", BINARYDIR, s)
+ conflict_suffixes=["", "-10gen", "-10gen-unstable"]
+ conflict_suffixes.remove(suffix)
+ s=re.sub("@@PACKAGE_CONFLICTS@@", ", ".join(["mongo"+_ for _ in conflict_suffixes]), s)
+ if suffix == "-10gen":
+ s=re.sub("@@PACKAGE_PROVIDES@@", "mongo-stable", s)
+ s=re.sub("@@PACKAGE_OBSOLETES@@", "mongo-stable", s)
+ elif suffix == "-10gen-unstable":
+ s=re.sub("@@PACKAGE_PROVIDES@@", "mongo-unstable", s)
+ s=re.sub("@@PACKAGE_OBSOLETES@@", "mongo-unstable", s)
+ else:
+ raise Exception("BUG: unknown suffix %s" % suffix)
+
+ lines=[]
+ for line in s.split("\n"):
+ m = re.search("@@VERSION>(.*)@@(.*)", line)
+ if m and spec.version_better_than(m.group(1)):
+ lines.append(m.group(2))
+ else:
+ lines.append(line)
+ s="\n".join(lines)
+
+ f=open(path, 'w')
+ try:
+ f.write(s)
+ finally:
+ f.close()
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/buildscripts/smoke.py b/buildscripts/smoke.py
index 5fdd26f..c46b5d1 100755
--- a/buildscripts/smoke.py
+++ b/buildscripts/smoke.py
@@ -110,7 +110,7 @@ class mongod(object):
sock.connect(("localhost", int(port)))
sock.close()
- def did_mongod_start(self, port=mongod_port, timeout=20):
+ def did_mongod_start(self, port=mongod_port, timeout=300):
while timeout > 0:
time.sleep(1)
try:
@@ -119,6 +119,7 @@ class mongod(object):
except Exception,e:
print >> sys.stderr, e
timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
return False
def start(self):
@@ -145,9 +146,13 @@ class mongod(object):
utils.ensureDir(dir_name)
argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
if self.kwargs.get('small_oplog'):
- argv += ["--master", "--oplogSize", "128"]
+ argv += ["--master", "--oplogSize", "256"]
if self.slave:
argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
print "running " + " ".join(argv)
self.proc = Popen(argv)
if not self.did_mongod_start(self.port):
@@ -240,11 +245,15 @@ def check_db_hashes(master, slave):
# Blech.
def skipTest(path):
if small_oplog:
- if os.path.basename(path) in ["cursor8.js", "indexh.js"]:
+ if os.path.basename(path) in ["cursor8.js", "indexh.js", "dropdb.js"]:
return True
return False
def runTest(test):
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
(path, usedb) = test
(ignore, ext) = os.path.splitext(path)
if skipTest(path):
@@ -269,11 +278,26 @@ def runTest(test):
"--port", mongod_port]
else:
raise Bug("fell off in extenstion case: %s" % path)
+ sys.stderr.write( "starting test : %s \n" % os.path.basename(path) )
+ sys.stderr.flush()
print " *******************************************"
print " Test : " + os.path.basename(path) + " ..."
t1 = time.time()
# FIXME: we don't handle the case where the subprocess
# hangs... that's bad.
+ if argv[0].endswith( 'mongo' ) and not '--eval' in argv :
+ argv = argv + [ '--eval', 'TestData = new Object();' +
+ 'TestData.testPath = "' + path + '";' +
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' +
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' +
+ 'TestData.noJournal = ' + ( 'true' if no_journal else 'false' ) + ";" +
+ 'TestData.noJournalPrealloc = ' + ( 'true' if no_preallocj else 'false' ) + ";" ]
+
+ if argv[0].endswith( 'test' ) and no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+
+
+ print argv
r = call(argv, cwd=test_path)
t2 = time.time()
print " " + str((t2 - t1) * 1000) + "ms"
@@ -295,7 +319,7 @@ def run_tests(tests):
# The reason we use with is so that we get __exit__ semantics
- with mongod(small_oplog=small_oplog) as master:
+ with mongod(small_oplog=small_oplog,no_journal=no_journal,no_preallocj=no_preallocj) as master:
with mongod(slave=True) if small_oplog else Nothing() as slave:
if small_oplog:
master.wait_for_repl()
@@ -415,7 +439,7 @@ def add_exe(e):
return e
def main():
- global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, smoke_db_prefix, test_path
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, smoke_db_prefix, test_path
parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
parser.add_option('--mode', dest='mode', default='suite',
help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
@@ -441,6 +465,12 @@ def main():
parser.add_option('--small-oplog', dest='small_oplog', default=False,
action="store_true",
help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
global tests
(options, tests) = parser.parse_args()
@@ -461,6 +491,8 @@ def main():
continue_on_failure = options.continue_on_failure
smoke_db_prefix = options.smoke_db_prefix
small_oplog = options.small_oplog
+ no_journal = options.no_journal
+ no_preallocj = options.no_preallocj
if options.File:
if options.File == '-':
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index 5725e5f..11890c8 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -17,7 +17,6 @@
#include "pch.h"
#include "../client/dbclient.h"
-#include "../db/dbhelpers.h"
#include "../db/cmdline.h"
#include "../s/shard.h"
@@ -29,6 +28,10 @@ namespace mongo {
bool dbexitCalled = false;
+ void exitCleanly( ExitCode code ) {
+ dbexit( code );
+ }
+
void dbexit( ExitCode returnCode, const char *whyMsg , bool tryToGetLock ) {
dbexitCalled = true;
out() << "dbexit called" << endl;
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 23d14da..2d7c37b 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -36,8 +36,9 @@ namespace mongo {
}
}
- void PoolForHost::done( DBClientBase * c ) {
+ void PoolForHost::done( DBConnectionPool * pool, DBClientBase * c ) {
if ( _pool.size() >= _maxPerHost ) {
+ pool->onDestory( c );
delete c;
}
else {
@@ -45,16 +46,24 @@ namespace mongo {
}
}
- DBClientBase * PoolForHost::get() {
+ DBClientBase * PoolForHost::get( DBConnectionPool * pool , double socketTimeout ) {
time_t now = time(0);
-
+
while ( ! _pool.empty() ) {
StoredConnection sc = _pool.top();
_pool.pop();
- if ( sc.ok( now ) )
- return sc.conn;
- delete sc.conn;
+
+ if ( ! sc.ok( now ) ) {
+ pool->onDestory( sc.conn );
+ delete sc.conn;
+ continue;
+ }
+
+ assert( sc.conn->getSoTimeout() == socketTimeout );
+
+ return sc.conn;
+
}
return NULL;
@@ -75,14 +84,34 @@ namespace mongo {
}
}
+ void PoolForHost::getStaleConnections( vector<DBClientBase*>& stale ) {
+ time_t now = time(0);
+
+ vector<StoredConnection> all;
+ while ( ! _pool.empty() ) {
+ StoredConnection c = _pool.top();
+ _pool.pop();
+
+ if ( c.ok( now ) )
+ all.push_back( c );
+ else
+ stale.push_back( c.conn );
+ }
+
+ for ( size_t i=0; i<all.size(); i++ ) {
+ _pool.push( all[i] );
+ }
+ }
+
+
PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ) {
conn = c;
when = time(0);
}
bool PoolForHost::StoredConnection::ok( time_t now ) {
- // if connection has been idle for an hour, kill it
- return ( now - when ) < 3600;
+ // if connection has been idle for 30 minutes, kill it
+ return ( now - when ) < 1800;
}
void PoolForHost::createdOne( DBClientBase * base) {
@@ -97,16 +126,23 @@ namespace mongo {
DBConnectionPool pool;
- DBClientBase* DBConnectionPool::_get(const string& ident) {
+ DBConnectionPool::DBConnectionPool()
+ : _mutex("DBConnectionPool") ,
+ _name( "dbconnectionpool" ) ,
+ _hooks( new list<DBConnectionHook*>() ) {
+ }
+
+ DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
+ assert( ! inShutdown() );
scoped_lock L(_mutex);
- PoolForHost& p = _pools[ident];
- return p.get();
+ PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
+ return p.get( this , socketTimeout );
}
- DBClientBase* DBConnectionPool::_finishCreate( const string& host , DBClientBase* conn ) {
+ DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
{
scoped_lock L(_mutex);
- PoolForHost& p = _pools[host];
+ PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
p.createdOne( conn );
}
@@ -116,22 +152,22 @@ namespace mongo {
return conn;
}
- DBClientBase* DBConnectionPool::get(const ConnectionString& url) {
- DBClientBase * c = _get( url.toString() );
+ DBClientBase* DBConnectionPool::get(const ConnectionString& url, double socketTimeout) {
+ DBClientBase * c = _get( url.toString() , socketTimeout );
if ( c ) {
onHandedOut( c );
return c;
}
string errmsg;
- c = url.connect( errmsg );
+ c = url.connect( errmsg, socketTimeout );
uassert( 13328 , _name + ": connect failed " + url.toString() + " : " + errmsg , c );
- return _finishCreate( url.toString() , c );
+ return _finishCreate( url.toString() , socketTimeout , c );
}
- DBClientBase* DBConnectionPool::get(const string& host) {
- DBClientBase * c = _get( host );
+ DBClientBase* DBConnectionPool::get(const string& host, double socketTimeout) {
+ DBClientBase * c = _get( host , socketTimeout );
if ( c ) {
onHandedOut( c );
return c;
@@ -141,12 +177,23 @@ namespace mongo {
ConnectionString cs = ConnectionString::parse( host , errmsg );
uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
- c = cs.connect( errmsg );
+ c = cs.connect( errmsg, socketTimeout );
if ( ! c )
throw SocketException( SocketException::CONNECT_ERROR , host , 11002 , str::stream() << _name << " error: " << errmsg );
- return _finishCreate( host , c );
+ return _finishCreate( host , socketTimeout , c );
+ }
+
+ void DBConnectionPool::release(const string& host, DBClientBase *c) {
+ if ( c->isFailed() ) {
+ onDestory( c );
+ delete c;
+ return;
+ }
+ scoped_lock L(_mutex);
+ _pools[PoolKey(host,c->getSoTimeout())].done(this,c);
}
+
DBConnectionPool::~DBConnectionPool() {
// connection closing is handled by ~PoolForHost
}
@@ -160,42 +207,55 @@ namespace mongo {
}
void DBConnectionPool::addHook( DBConnectionHook * hook ) {
- _hooks.push_back( hook );
+ _hooks->push_back( hook );
}
void DBConnectionPool::onCreate( DBClientBase * conn ) {
- if ( _hooks.size() == 0 )
+ if ( _hooks->size() == 0 )
return;
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
(*i)->onCreate( conn );
}
}
void DBConnectionPool::onHandedOut( DBClientBase * conn ) {
- if ( _hooks.size() == 0 )
+ if ( _hooks->size() == 0 )
return;
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
(*i)->onHandedOut( conn );
}
}
+ void DBConnectionPool::onDestory( DBClientBase * conn ) {
+ if ( _hooks->size() == 0 )
+ return;
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
+ (*i)->onDestory( conn );
+ }
+ }
+
void DBConnectionPool::appendInfo( BSONObjBuilder& b ) {
- BSONObjBuilder bb( b.subobjStart( "hosts" ) );
+
int avail = 0;
long long created = 0;
map<ConnectionString::ConnectionType,long long> createdByType;
+ set<string> replicaSets;
+
+ BSONObjBuilder bb( b.subobjStart( "hosts" ) );
{
scoped_lock lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
if ( i->second.numCreated() == 0 )
continue;
- string s = i->first;
+ string s = str::stream() << i->first.ident << "::" << i->first.timeout;
+
BSONObjBuilder temp( bb.subobjStart( s ) );
temp.append( "available" , i->second.numAvailable() );
temp.appendNumber( "created" , i->second.numCreated() );
@@ -206,9 +266,33 @@ namespace mongo {
long long& x = createdByType[i->second.type()];
x += i->second.numCreated();
+
+ {
+ string setName = i->first.ident;
+ if ( setName.find( "/" ) != string::npos ) {
+ setName = setName.substr( 0 , setName.find( "/" ) );
+ replicaSets.insert( setName );
+ }
+ }
}
}
bb.done();
+
+
+ BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) );
+ for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) {
+ string rs = *i;
+ ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs );
+ if ( ! m ) {
+ warning() << "no monitor for set: " << rs << endl;
+ continue;
+ }
+
+ BSONObjBuilder temp( setBuilder.subobjStart( rs ) );
+ m->appendInfo( temp );
+ temp.done();
+ }
+ setBuilder.done();
{
BSONObjBuilder temp( bb.subobjStart( "createdByType" ) );
@@ -223,21 +307,82 @@ namespace mongo {
}
bool DBConnectionPool::serverNameCompare::operator()( const string& a , const string& b ) const{
- string ap = str::before( a , "/" );
- string bp = str::before( b , "/" );
+ const char* ap = a.c_str();
+ const char* bp = b.c_str();
+
+ while (true){
+ if (*ap == '\0' || *ap == '/'){
+ if (*bp == '\0' || *bp == '/')
+ return false; // equal strings
+ else
+ return true; // a is shorter
+ }
+
+ if (*bp == '\0' || *bp == '/')
+ return false; // b is shorter
+
+ if ( *ap < *bp)
+ return true;
+ else if (*ap > *bp)
+ return false;
+
+ ++ap;
+ ++bp;
+ }
+ assert(false);
+ }
+
+ bool DBConnectionPool::poolKeyCompare::operator()( const PoolKey& a , const PoolKey& b ) const {
+ if (DBConnectionPool::serverNameCompare()( a.ident , b.ident ))
+ return true;
- return ap < bp;
+ if (DBConnectionPool::serverNameCompare()( b.ident , a.ident ))
+ return false;
+
+ return a.timeout < b.timeout;
+ }
+
+
+ void DBConnectionPool::taskDoWork() {
+ vector<DBClientBase*> toDelete;
+
+ {
+ // we need to get the connections inside the lock
+ // but we can actually delete them outside
+ scoped_lock lk( _mutex );
+ for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
+ i->second.getStaleConnections( toDelete );
+ }
+ }
+
+ for ( size_t i=0; i<toDelete.size(); i++ ) {
+ try {
+ onDestory( toDelete[i] );
+ delete toDelete[i];
+ }
+ catch ( ... ) {
+ // we don't care if there was a socket error
+ }
+ }
}
// ------ ScopedDbConnection ------
ScopedDbConnection * ScopedDbConnection::steal() {
assert( _conn );
- ScopedDbConnection * n = new ScopedDbConnection( _host , _conn );
+ ScopedDbConnection * n = new ScopedDbConnection( _host , _conn, _socketTimeout );
_conn = 0;
return n;
}
+ void ScopedDbConnection::_setSocketTimeout(){
+ if( ! _conn ) return;
+ if( _conn->type() == ConnectionString::MASTER )
+ (( DBClientConnection* ) _conn)->setSoTimeout( _socketTimeout );
+ else if( _conn->type() == ConnectionString::SYNC )
+ (( SyncClusterConnection* ) _conn)->setAllSoTimeouts( _socketTimeout );
+ }
+
ScopedDbConnection::~ScopedDbConnection() {
if ( _conn ) {
if ( ! _conn->isFailed() ) {
@@ -248,12 +393,14 @@ namespace mongo {
}
}
- ScopedDbConnection::ScopedDbConnection(const Shard& shard )
- : _host( shard.getConnString() ) , _conn( pool.get(_host) ) {
+ ScopedDbConnection::ScopedDbConnection(const Shard& shard, double socketTimeout )
+ : _host( shard.getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
}
- ScopedDbConnection::ScopedDbConnection(const Shard* shard )
- : _host( shard->getConnString() ) , _conn( pool.get(_host) ) {
+ ScopedDbConnection::ScopedDbConnection(const Shard* shard, double socketTimeout )
+ : _host( shard->getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
}
@@ -262,7 +409,7 @@ namespace mongo {
PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
virtual void help( stringstream &help ) const { help<<"internal"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.flush();
return true;
}
@@ -277,7 +424,7 @@ namespace mongo {
PoolStats() : Command( "connPoolStats" ) {}
virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.appendInfo( result );
result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
diff --git a/client/connpool.h b/client/connpool.h
index e7f59d6..a37dad7 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -21,9 +21,12 @@
#include "dbclient.h"
#include "redef_macros.h"
+#include "../util/background.h"
+
namespace mongo {
class Shard;
+ class DBConnectionPool;
/**
* not thread safe
@@ -44,7 +47,7 @@ namespace mongo {
int numAvailable() const { return (int)_pool.size(); }
- void createdOne( DBClientBase * base);
+ void createdOne( DBClientBase * base );
long long numCreated() const { return _created; }
ConnectionString::ConnectionType type() const { assert(_created); return _type; }
@@ -52,11 +55,13 @@ namespace mongo {
/**
* gets a connection or return NULL
*/
- DBClientBase * get();
+ DBClientBase * get( DBConnectionPool * pool , double socketTimeout );
- void done( DBClientBase * c );
+ void done( DBConnectionPool * pool , DBClientBase * c );
void flush();
+
+ void getStaleConnections( vector<DBClientBase*>& stale );
static void setMaxPerHost( unsigned max ) { _maxPerHost = max; }
static unsigned getMaxPerHost() { return _maxPerHost; }
@@ -72,6 +77,7 @@ namespace mongo {
};
std::stack<StoredConnection> _pool;
+
long long _created;
ConnectionString::ConnectionType _type;
@@ -83,6 +89,7 @@ namespace mongo {
virtual ~DBConnectionHook() {}
virtual void onCreate( DBClientBase * conn ) {}
virtual void onHandedOut( DBClientBase * conn ) {}
+ virtual void onDestory( DBClientBase * conn ) {}
};
/** Database connection pool.
@@ -100,29 +107,11 @@ namespace mongo {
c.conn()...
}
*/
- class DBConnectionPool {
+ class DBConnectionPool : public PeriodicTask {
public:
- /** compares server namees, but is smart about replica set names */
- struct serverNameCompare {
- bool operator()( const string& a , const string& b ) const;
- };
-
- private:
-
- mongo::mutex _mutex;
- typedef map<string,PoolForHost,serverNameCompare> PoolMap; // servername -> pool
- PoolMap _pools;
- list<DBConnectionHook*> _hooks;
- string _name;
-
- DBClientBase* _get( const string& ident );
-
- DBClientBase* _finishCreate( const string& ident , DBClientBase* conn );
-
- public:
- DBConnectionPool() : _mutex("DBConnectionPool") , _name( "dbconnectionpool" ) { }
+ DBConnectionPool();
~DBConnectionPool();
/** right now just controls some asserts. defaults to "dbconnectionpool" */
@@ -130,22 +119,54 @@ namespace mongo {
void onCreate( DBClientBase * conn );
void onHandedOut( DBClientBase * conn );
+ void onDestory( DBClientBase * conn );
void flush();
- DBClientBase *get(const string& host);
- DBClientBase *get(const ConnectionString& host);
+ DBClientBase *get(const string& host, double socketTimeout = 0);
+ DBClientBase *get(const ConnectionString& host, double socketTimeout = 0);
- void release(const string& host, DBClientBase *c) {
- if ( c->isFailed() ) {
- delete c;
- return;
- }
- scoped_lock L(_mutex);
- _pools[host].done(c);
- }
- void addHook( DBConnectionHook * hook );
+ void release(const string& host, DBClientBase *c);
+
+ void addHook( DBConnectionHook * hook ); // we take ownership
void appendInfo( BSONObjBuilder& b );
+
+ /** compares server namees, but is smart about replica set names */
+ struct serverNameCompare {
+ bool operator()( const string& a , const string& b ) const;
+ };
+
+ virtual string taskName() const { return "DBConnectionPool-cleaner"; }
+ virtual void taskDoWork();
+
+ private:
+ DBConnectionPool( DBConnectionPool& p );
+
+ DBClientBase* _get( const string& ident , double socketTimeout );
+
+ DBClientBase* _finishCreate( const string& ident , double socketTimeout, DBClientBase* conn );
+
+ struct PoolKey {
+ PoolKey( string i , double t ) : ident( i ) , timeout( t ) {}
+ string ident;
+ double timeout;
+ };
+
+ struct poolKeyCompare {
+ bool operator()( const PoolKey& a , const PoolKey& b ) const;
+ };
+
+ typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servername -> pool
+
+ mongo::mutex _mutex;
+ string _name;
+
+ PoolMap _pools;
+
+ // pointers owned by me, right now they leak on shutdown
+ // _hooks itself also leaks because it creates a shutdown race condition
+ list<DBConnectionHook*> * _hooks;
+
};
extern DBConnectionPool pool;
@@ -154,9 +175,15 @@ namespace mongo {
public:
AScopedConnection() { _numConnections++; }
virtual ~AScopedConnection() { _numConnections--; }
+
virtual DBClientBase* get() = 0;
virtual void done() = 0;
virtual string getHost() const = 0;
+
+ /**
+ * @return true iff this has a connection to the db
+ */
+ virtual bool ok() const = 0;
/**
* @return total number of current instances of AScopedConnection
@@ -176,19 +203,25 @@ namespace mongo {
/** the main constructor you want to use
throws UserException if can't connect
*/
- explicit ScopedDbConnection(const string& host) : _host(host), _conn( pool.get(host) ) {}
+ explicit ScopedDbConnection(const string& host, double socketTimeout = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
- ScopedDbConnection() : _host( "" ) , _conn(0) {}
+ ScopedDbConnection() : _host( "" ) , _conn(0), _socketTimeout( 0 ) {}
/* @param conn - bind to an existing connection */
- ScopedDbConnection(const string& host, DBClientBase* conn ) : _host( host ) , _conn( conn ) {}
+ ScopedDbConnection(const string& host, DBClientBase* conn, double socketTimeout = 0 ) : _host( host ) , _conn( conn ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
/** throws UserException if can't connect */
- explicit ScopedDbConnection(const ConnectionString& url ) : _host(url.toString()), _conn( pool.get(url) ) {}
+ explicit ScopedDbConnection(const ConnectionString& url, double socketTimeout = 0 ) : _host(url.toString()), _conn( pool.get(url, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
/** throws UserException if can't connect */
- explicit ScopedDbConnection(const Shard& shard );
- explicit ScopedDbConnection(const Shard* shard );
+ explicit ScopedDbConnection(const Shard& shard, double socketTimeout = 0 );
+ explicit ScopedDbConnection(const Shard* shard, double socketTimeout = 0 );
~ScopedDbConnection();
@@ -210,6 +243,8 @@ namespace mongo {
return _conn;
}
+ bool ok() const { return _conn > 0; }
+
string getHost() const { return _host; }
/** Force closure of the connection. You should call this if you leave it in
@@ -242,8 +277,12 @@ namespace mongo {
ScopedDbConnection * steal();
private:
+
+ void _setSocketTimeout();
+
const string _host;
DBClientBase *_conn;
+ const double _socketTimeout;
};
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index bb24199..dadf7e4 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -64,21 +64,23 @@ namespace mongo {
}
- DBClientBase* ConnectionString::connect( string& errmsg ) const {
+ DBClientBase* ConnectionString::connect( string& errmsg, double socketTimeout ) const {
switch ( _type ) {
case MASTER: {
DBClientConnection * c = new DBClientConnection(true);
+ c->setSoTimeout( socketTimeout );
log(1) << "creating new connection to:" << _servers[0] << endl;
if ( ! c->connect( _servers[0] , errmsg ) ) {
delete c;
return 0;
}
+ log(1) << "connected connection!" << endl;
return c;
}
case PAIR:
case SET: {
- DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers );
+ DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers , socketTimeout );
if( ! set->connect() ) {
delete set;
errmsg = "connect failed to set ";
@@ -93,7 +95,8 @@ namespace mongo {
list<HostAndPort> l;
for ( unsigned i=0; i<_servers.size(); i++ )
l.push_back( _servers[i] );
- return new SyncClusterConnection( l );
+ SyncClusterConnection* c = new SyncClusterConnection( l, socketTimeout );
+ return c;
}
case INVALID:
@@ -294,7 +297,7 @@ namespace mongo {
return b.obj();
}
- BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
+ const BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
BSONObj DBClientWithCommands::getLastErrorDetailed() {
BSONObj info;
@@ -314,7 +317,7 @@ namespace mongo {
return e.str();
}
- BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
+ const BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
BSONObj DBClientWithCommands::getPrevError() {
BSONObj info;
@@ -391,6 +394,7 @@ namespace mongo {
}
bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
+ assert(!capped||size);
BSONObj o;
if ( info == 0 ) info = &o;
BSONObjBuilder b;
@@ -529,19 +533,31 @@ namespace mongo {
return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
}
- BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void DBClientInterface::findN(vector<BSONObj>& out, const string& ns, Query query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions) {
+ out.reserve(nToReturn);
+
auto_ptr<DBClientCursor> c =
- this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
+ this->query(ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions);
- uassert( 10276 , str::stream() << "DBClientBase::findOne: transport error: " << getServerAddress() << " query: " << query.toString(), c.get() );
+ uassert( 10276 , str::stream() << "DBClientBase::findN: transport error: " << getServerAddress() << " query: " << query.toString(), c.get() );
if ( c->hasResultFlag( ResultFlag_ShardConfigStale ) )
- throw StaleConfigException( ns , "findOne has stale config" );
+ throw StaleConfigException( ns , "findN stale config" );
- if ( !c->more() )
- return BSONObj();
+ for( int i = 0; i < nToReturn; i++ ) {
+ if ( !c->more() )
+ break;
+ out.push_back( c->nextSafe().copy() );
+ }
+ }
- return c->nextSafe().copy();
+ BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ vector<BSONObj> v;
+ findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions);
+ return v.empty() ? BSONObj() : v[0];
}
bool DBClientConnection::connect(const HostAndPort& server, string& errmsg) {
@@ -558,39 +574,50 @@ namespace mongo {
p.reset(new MessagingPort( _so_timeout, _logLevel ));
if (server->getAddr() == "0.0.0.0") {
- failed = true;
+ _failed = true;
return false;
}
+ // if( _so_timeout == 0 ){
+ // printStackTrace();
+ // log() << "Connecting to server " << _serverString << " timeout " << _so_timeout << endl;
+ // }
if ( !p->connect(*server) ) {
stringstream ss;
ss << "couldn't connect to server " << _serverString;
errmsg = ss.str();
- failed = true;
+ _failed = true;
return false;
}
+
+#ifdef MONGO_SSL
+ if ( cmdLine.sslOnNormalPorts ) {
+ p->secure( sslManager() );
+ }
+#endif
+
return true;
}
void DBClientConnection::_checkConnection() {
- if ( !failed )
+ if ( !_failed )
return;
if ( lastReconnectTry && time(0)-lastReconnectTry < 2 ) {
// we wait a little before reconnect attempt to avoid constant hammering.
// but we throw we don't want to try to use a connection in a bad state
- throw SocketException(SocketException::FAILED_STATE);
+ throw SocketException( SocketException::FAILED_STATE , toString() );
}
if ( !autoReconnect )
- throw SocketException(SocketException::FAILED_STATE);
+ throw SocketException( SocketException::FAILED_STATE , toString() );
lastReconnectTry = time(0);
log(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
- failed = false;
+ _failed = false;
if ( ! _connect(errmsg) ) {
- failed = true;
+ _failed = true;
log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
- throw SocketException(SocketException::CONNECT_ERROR);
+ throw SocketException( SocketException::CONNECT_ERROR , toString() );
}
log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
@@ -675,7 +702,7 @@ namespace mongo {
/* connection CANNOT be used anymore as more data may be on the way from the server.
we have to reconnect.
*/
- failed = true;
+ _failed = true;
p->shutdown();
throw;
}
@@ -683,12 +710,11 @@ namespace mongo {
return n;
}
- void DBClientBase::insert( const string & ns , BSONObj obj ) {
+ void DBClientBase::insert( const string & ns , BSONObj obj , int flags) {
Message toSend;
BufBuilder b;
- int opts = 0;
- b.appendNum( opts );
+ b.appendNum( flags );
b.appendStr( ns );
obj.appendSelfToBufBuilder( b );
@@ -697,12 +723,11 @@ namespace mongo {
say( toSend );
}
- void DBClientBase::insert( const string & ns , const vector< BSONObj > &v ) {
+ void DBClientBase::insert( const string & ns , const vector< BSONObj > &v , int flags) {
Message toSend;
BufBuilder b;
- int opts = 0;
- b.appendNum( opts );
+ b.appendNum( flags );
b.appendStr( ns );
for( vector< BSONObj >::const_iterator i = v.begin(); i != v.end(); ++i )
i->appendSelfToBufBuilder( b );
@@ -750,8 +775,12 @@ namespace mongo {
toSend.setData( dbUpdate , b.buf() , b.len() );
say( toSend );
+
+
}
+
+
auto_ptr<DBClientCursor> DBClientWithCommands::getIndexes( const string &ns ) {
return query( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , BSON( "ns" << ns ) );
}
@@ -816,7 +845,7 @@ namespace mongo {
return ss.str();
}
- bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name , bool cache ) {
+ bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name , bool cache, bool background, int version ) {
BSONObjBuilder toSave;
toSave.append( "ns" , ns );
toSave.append( "key" , keys );
@@ -834,9 +863,15 @@ namespace mongo {
cacheKey += nn;
}
+ if( version >= 0 )
+ toSave.append("v", version);
+
if ( unique )
toSave.appendBool( "unique", unique );
+ if( background )
+ toSave.appendBool( "background", true );
+
if ( _seenIndexes.count( cacheKey ) )
return 0;
@@ -874,13 +909,13 @@ namespace mongo {
toSend.setData(dbQuery, b.buf(), b.len());
}
- void DBClientConnection::say( Message &toSend ) {
+ void DBClientConnection::say( Message &toSend, bool isRetry ) {
checkConnection();
try {
port().say( toSend );
}
catch( SocketException & ) {
- failed = true;
+ _failed = true;
throw;
}
}
@@ -889,8 +924,8 @@ namespace mongo {
port().piggyBack( toSend );
}
- void DBClientConnection::recv( Message &m ) {
- port().recv(m);
+ bool DBClientConnection::recv( Message &m ) {
+ return port().recv(m);
}
bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
@@ -900,7 +935,7 @@ namespace mongo {
*/
try {
if ( !port().call(toSend, response) ) {
- failed = true;
+ _failed = true;
if ( assertOk )
uasserted( 10278 , str::stream() << "dbclient error communicating with server: " << getServerAddress() );
@@ -908,21 +943,46 @@ namespace mongo {
}
}
catch( SocketException & ) {
- failed = true;
+ _failed = true;
throw;
}
return true;
}
- void DBClientConnection::checkResponse( const char *data, int nReturned ) {
+ BSONElement getErrField(const BSONObj& o) {
+ BSONElement first = o.firstElement();
+ if( strcmp(first.fieldName(), "$err") == 0 )
+ return first;
+
+ // temp - will be DEV only later
+ /*DEV*/
+ if( 1 ) {
+ BSONElement e = o["$err"];
+ if( !e.eoo() ) {
+ wassert(false);
+ }
+ return e;
+ }
+
+ return BSONElement();
+ }
+
+ bool hasErrField( const BSONObj& o ){
+ return ! getErrField( o ).eoo();
+ }
+
+ void DBClientConnection::checkResponse( const char *data, int nReturned, bool* retry, string* host ) {
/* check for errors. the only one we really care about at
* this stage is "not master"
*/
+ *retry = false;
+ *host = _serverString;
+
if ( clientSet && nReturned ) {
assert(data);
BSONObj o(data);
- BSONElement e = o["$err"];
+ BSONElement e = getErrField(o);
if ( e.type() == String && str::contains( e.valuestr() , "not master" ) ) {
clientSet->isntMaster();
}
@@ -930,7 +990,7 @@ namespace mongo {
}
void DBClientConnection::killCursor( long long cursorId ) {
- BufBuilder b;
+ StackBufBuilder b;
b.appendNum( (int)0 ); // reserved
b.appendNum( (int)1 ); // number
b.appendNum( cursorId );
@@ -944,6 +1004,19 @@ namespace mongo {
say(m);
}
+#ifdef MONGO_SSL
+ SSLManager* DBClientConnection::sslManager() {
+ if ( _sslManager )
+ return _sslManager;
+
+ SSLManager* s = new SSLManager(true);
+ _sslManager = s;
+ return s;
+ }
+
+ SSLManager* DBClientConnection::_sslManager = 0;
+#endif
+
AtomicUInt DBClientConnection::_numConnections;
bool DBClientConnection::_lazyKillCursor = true;
diff --git a/client/dbclient.h b/client/dbclient.h
index 9bc71fd..2b4bb85 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -1,4 +1,7 @@
-/** @file dbclient.h - connect to a Mongo database as a database, from C++ */
+/** @file dbclient.h
+
+ Core MongoDB C++ driver interfaces are defined here.
+*/
/* Copyright 2009 10gen Inc.
*
@@ -18,7 +21,8 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/message_port.h"
#include "../db/jsobj.h"
#include "../db/json.h"
#include <stack>
@@ -100,6 +104,15 @@ namespace mongo {
RemoveOption_Broadcast = 1 << 1
};
+
+ /**
+ * need to put in DbMesssage::ReservedOptions as well
+ */
+ enum InsertOptions {
+ /** With muli-insert keep processing inserts if one fails */
+ InsertOption_ContinueOnError = 1 << 0
+ };
+
class DBClientBase;
/**
@@ -174,7 +187,7 @@ namespace mongo {
string toString() const { return _string; }
- DBClientBase* connect( string& errmsg ) const;
+ DBClientBase* connect( string& errmsg, double socketTimeout = 0 ) const;
string getSetName() const { return _setName; }
@@ -296,7 +309,7 @@ namespace mongo {
Query& where(const string &jscode) { return where(jscode, BSONObj()); }
/**
- * if this query has an orderby, hint, or some other field
+ * @return true if this query has an orderby, hint, or some other field
*/
bool isComplex( bool * hasDollar = 0 ) const;
@@ -332,12 +345,15 @@ namespace mongo {
virtual ~DBConnector() {}
/** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 ) = 0;
- virtual void say( Message &toSend ) = 0;
+ virtual void say( Message &toSend, bool isRetry = false ) = 0;
virtual void sayPiggyBack( Message &toSend ) = 0;
- virtual void checkResponse( const char* data, int nReturned ) {}
-
/* used by QueryOption_Exhaust. To use that your subclass must implement this. */
- virtual void recv( Message& m ) { assert(false); }
+ virtual bool recv( Message& m ) { assert(false); return false; }
+ // In general, for lazy queries, we'll need to say, recv, then checkResponse
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
+ if( retry ) *retry = false; if( targetHost ) *targetHost = "";
+ }
+ virtual bool lazySupported() const = 0;
};
/**
@@ -348,12 +364,9 @@ namespace mongo {
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
- /** don't use this - called automatically by DBClientCursor for you */
- virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
-
- virtual void insert( const string &ns, BSONObj obj ) = 0;
+ virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0;
- virtual void insert( const string &ns, const vector< BSONObj >& v ) = 0;
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0;
virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
@@ -367,8 +380,15 @@ namespace mongo {
*/
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void findN(vector<BSONObj>& out, const string&ns, Query query, int nToReturn, int nToSkip = 0, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
virtual string getServerAddress() const = 0;
+ /** don't use this - called automatically by DBClientCursor for you */
+ virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
};
/**
@@ -449,15 +469,19 @@ namespace mongo {
*/
bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
- /** Get error result from the last operation on this connection.
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
@return error message text, or empty string if no error.
*/
string getLastError();
- /** Get error result from the last operation on this connection.
+
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
@return full error object.
*/
virtual BSONObj getLastErrorDetailed();
+ /** Can be called with the returned value from getLastErrorDetailed to extract an error string.
+ If all you need is the string, just call getLastError() instead.
+ */
static string getLastErrorString( const BSONObj& res );
/** Return the last error which has occurred, even if not the very last operation.
@@ -640,13 +664,15 @@ namespace mongo {
@param ns collection to be indexed
@param keys the "key pattern" for the index. e.g., { name : 1 }
@param unique if true, indicates that key uniqueness should be enforced for this index
- @param name if not isn't specified, it will be created from the keys (recommended)
+ @param name if not specified, it will be created from the keys automatically (which is recommended)
@param cache if set to false, the index cache for the connection won't remember this call
+ @param background build index in the background (see mongodb docs/wiki for details)
+ @param v index version. leave at default value. (unit tests set this parameter.)
@return whether or not sent message to db.
should be true on first call, false on subsequent unless resetIndexCache was called
*/
virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
- bool cache = true );
+ bool cache = true, bool background = false, int v = -1 );
/**
clears the index cache, so the subsequent call to ensureIndex for any index will go to the server
@@ -748,12 +774,12 @@ namespace mongo {
/**
insert an object into the database
*/
- virtual void insert( const string &ns , BSONObj obj );
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
/**
insert a vector of objects into the database
*/
- virtual void insert( const string &ns, const vector< BSONObj >& v );
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
/**
remove matching objects from the database
@@ -772,9 +798,10 @@ namespace mongo {
virtual bool callRead( Message& toSend , Message& response ) = 0;
// virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
- virtual void say( Message& toSend ) = 0;
-
+
virtual ConnectionString::ConnectionType type() const = 0;
+
+ virtual double getSoTimeout() const = 0;
}; // DBClientBase
@@ -798,7 +825,7 @@ namespace mongo {
Connect timeout is fixed, but short, at 5 seconds.
*/
DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double so_timeout=0) :
- clientSet(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
+ clientSet(cp), _failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
_numConnections++;
}
@@ -869,14 +896,14 @@ namespace mongo {
@return true if this connection is currently in a failed state. When autoreconnect is on,
a connection will transition back to an ok state after reconnecting.
*/
- bool isFailed() const { return failed; }
+ bool isFailed() const { return _failed; }
- MessagingPort& port() { return *p; }
+ MessagingPort& port() { assert(p); return *p; }
string toStringLong() const {
stringstream ss;
ss << _serverString;
- if ( failed ) ss << " failed";
+ if ( _failed ) ss << " failed";
return ss.str();
}
@@ -887,11 +914,15 @@ namespace mongo {
virtual void killCursor( long long cursorID );
virtual bool callRead( Message& toSend , Message& response ) { return call( toSend , response ); }
- virtual void say( Message &toSend );
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message& m );
+ virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL );
virtual bool call( Message &toSend, Message &response, bool assertOk = true , string * actualServer = 0 );
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
- virtual void checkResponse( const char *data, int nReturned );
void setSoTimeout(double to) { _so_timeout = to; }
+ double getSoTimeout() const { return _so_timeout; }
+
+ virtual bool lazySupported() const { return true; }
static int getNumConnections() {
return _numConnections;
@@ -899,16 +930,15 @@ namespace mongo {
static void setLazyKillCursor( bool lazy ) { _lazyKillCursor = lazy; }
static bool getLazyKillCursor() { return _lazyKillCursor; }
-
+
protected:
friend class SyncClusterConnection;
- virtual void recv( Message& m );
virtual void sayPiggyBack( Message &toSend );
DBClientReplicaSet *clientSet;
boost::scoped_ptr<MessagingPort> p;
boost::scoped_ptr<SockAddr> server;
- bool failed;
+ bool _failed;
const bool autoReconnect;
time_t lastReconnectTry;
HostAndPort _server; // remember for reconnects
@@ -916,7 +946,7 @@ namespace mongo {
void _checkConnection();
// throws SocketException if in failed state and not reconnecting or if waiting to reconnect
- void checkConnection() { if( failed ) _checkConnection(); }
+ void checkConnection() { if( _failed ) _checkConnection(); }
map< string, pair<string,string> > authCache;
double _so_timeout;
@@ -924,6 +954,11 @@ namespace mongo {
static AtomicUInt _numConnections;
static bool _lazyKillCursor; // lazy means we piggy back kill cursors on next op
+
+#ifdef MONGO_SSL
+ static SSLManager* sslManager();
+ static SSLManager* _sslManager;
+#endif
};
/** pings server to check if it's up
@@ -932,6 +967,9 @@ namespace mongo {
DBClientBase * createDirectClient();
+ BSONElement getErrField( const BSONObj& result );
+ bool hasErrField( const BSONObj& result );
+
} // namespace mongo
#include "dbclientcursor.h"
diff --git a/client/dbclient_rs.cpp b/client/dbclient_rs.cpp
index 37f6225..2cab1f7 100644
--- a/client/dbclient_rs.cpp
+++ b/client/dbclient_rs.cpp
@@ -54,9 +54,9 @@ namespace mongo {
void run() {
log() << "starting" << endl;
while ( ! inShutdown() ) {
- sleepsecs( 20 );
+ sleepsecs( 10 );
try {
- ReplicaSetMonitor::checkAll();
+ ReplicaSetMonitor::checkAll( true );
}
catch ( std::exception& e ) {
error() << "check failed: " << e.what() << endl;
@@ -99,17 +99,14 @@ namespace mongo {
}
_nodes.push_back( Node( servers[i] , conn.release() ) );
-
+
+ int myLoc = _nodes.size() - 1;
string maybePrimary;
- if (_checkConnection( _nodes[_nodes.size()-1].conn , maybePrimary, false)) {
- break;
- }
+ _checkConnection( _nodes[myLoc].conn.get() , maybePrimary, false, myLoc );
}
}
ReplicaSetMonitor::~ReplicaSetMonitor() {
- for ( unsigned i=0; i<_nodes.size(); i++ )
- delete _nodes[i].conn;
_nodes.clear();
_master = -1;
}
@@ -125,7 +122,16 @@ namespace mongo {
return m;
}
- void ReplicaSetMonitor::checkAll() {
+ ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name ) {
+ scoped_lock lk( _setsLock );
+ map<string,ReplicaSetMonitorPtr>::const_iterator i = _sets.find( name );
+ if ( i == _sets.end() )
+ return ReplicaSetMonitorPtr();
+ return i->second;
+ }
+
+
+ void ReplicaSetMonitor::checkAll( bool checkAllSecondaries ) {
set<string> seen;
while ( true ) {
@@ -146,7 +152,7 @@ namespace mongo {
if ( ! m )
break;
- m->check();
+ m->check( checkAllSecondaries );
}
@@ -202,7 +208,7 @@ namespace mongo {
return _nodes[_master].addr;
}
- _check();
+ _check( false );
scoped_lock lk( _lock );
uassert( 10009 , str::stream() << "ReplicaSetMonitor no master found for set: " << _name , _master >= 0 );
@@ -210,34 +216,70 @@ namespace mongo {
}
HostAndPort ReplicaSetMonitor::getSlave( const HostAndPort& prev ) {
- // make sure its valid
- if ( prev.port() > 0 ) {
+ // make sure its valid
+
+ bool wasFound = false;
+
+ // This is always true, since checked in port()
+ assert( prev.port() >= 0 );
+ if( prev.host().size() ){
scoped_lock lk( _lock );
for ( unsigned i=0; i<_nodes.size(); i++ ) {
if ( prev != _nodes[i].addr )
continue;
- if ( _nodes[i].ok )
+ wasFound = true;
+
+ if ( _nodes[i].okForSecondaryQueries() )
return prev;
+
break;
}
}
+ if( prev.host().size() ){
+ if( wasFound ){ LOG(1) << "slave '" << prev << "' is no longer ok to use" << endl; }
+ else{ LOG(1) << "slave '" << prev << "' was not found in the replica set" << endl; }
+ }
+ else LOG(1) << "slave '" << prev << "' is not initialized or invalid" << endl;
+
return getSlave();
}
HostAndPort ReplicaSetMonitor::getSlave() {
- scoped_lock lk( _lock );
- for ( unsigned i=0; i<_nodes.size(); i++ ) {
- _nextSlave = ( _nextSlave + 1 ) % _nodes.size();
- if ( _nextSlave == _master )
- continue;
- if ( _nodes[ _nextSlave ].ok )
- return _nodes[ _nextSlave ].addr;
+ LOG(2) << "selecting new slave from replica set " << getServerAddress() << endl;
+
+ // Logic is to retry three times for any secondary node, if we can't find any secondary, we'll take
+ // any "ok" node
+ // TODO: Could this query hidden nodes?
+ const int MAX = 3;
+ for ( int xxx=0; xxx<MAX; xxx++ ) {
+
+ {
+ scoped_lock lk( _lock );
+
+ unsigned i = 0;
+ for ( ; i<_nodes.size(); i++ ) {
+ _nextSlave = ( _nextSlave + 1 ) % _nodes.size();
+ if ( _nextSlave == _master ){
+ LOG(2) << "not selecting " << _nodes[_nextSlave] << " as it is the current master" << endl;
+ continue;
+ }
+ if ( _nodes[ _nextSlave ].okForSecondaryQueries() || ( _nodes[ _nextSlave ].ok && ( xxx + 1 ) >= MAX ) )
+ return _nodes[ _nextSlave ].addr;
+
+ LOG(2) << "not selecting " << _nodes[_nextSlave] << " as it is not ok to use" << endl;
+ }
+
+ }
+
+ check(false);
}
+
+ LOG(2) << "no suitable slave nodes found, returning default node " << _nodes[ 0 ] << endl;
- return _nodes[ 0 ].addr;
+ return _nodes[0].addr;
}
/**
@@ -266,7 +308,7 @@ namespace mongo {
string host = member["name"].String();
int m = -1;
- if ((m = _find(host)) <= 0) {
+ if ((m = _find(host)) < 0) {
continue;
}
@@ -309,16 +351,34 @@ namespace mongo {
- bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose ) {
+ bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset ) {
scoped_lock lk( _checkConnectionLock );
bool isMaster = false;
bool changed = false;
try {
+ Timer t;
BSONObj o;
c->isMaster(isMaster, &o);
+
+ if ( o["setName"].type() != String || o["setName"].String() != _name ) {
+ warning() << "node: " << c->getServerAddress() << " isn't a part of set: " << _name
+ << " ismaster: " << o << endl;
+ if ( nodesOffset >= 0 )
+ _nodes[nodesOffset].ok = false;
+ return false;
+ }
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+ if ( nodesOffset >= 0 ) {
+ _nodes[nodesOffset].pingTimeMillis = t.millis();
+ _nodes[nodesOffset].hidden = o["hidden"].trueValue();
+ _nodes[nodesOffset].secondary = o["secondary"].trueValue();
+ _nodes[nodesOffset].ismaster = o["ismaster"].trueValue();
+
+ _nodes[nodesOffset].lastIsMaster = o.copy();
+ }
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+
// add other nodes
if ( o["hosts"].type() == Array ) {
if ( o["primary"].type() == String )
@@ -329,11 +389,14 @@ namespace mongo {
if (o.hasField("passives") && o["passives"].type() == Array) {
_checkHosts(o["passives"].Obj(), changed);
}
-
+
_checkStatus(c);
+
+
}
catch ( std::exception& e ) {
log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception " << c->toString() << ' ' << e.what() << endl;
+ _nodes[nodesOffset].ok = false;
}
if ( changed && _hook )
@@ -342,24 +405,28 @@ namespace mongo {
return isMaster;
}
- void ReplicaSetMonitor::_check() {
+ void ReplicaSetMonitor::_check( bool checkAllSecondaries ) {
bool triedQuickCheck = false;
LOG(1) << "_check : " << getServerAddress() << endl;
+ int newMaster = -1;
+
for ( int retry = 0; retry < 2; retry++ ) {
for ( unsigned i=0; i<_nodes.size(); i++ ) {
- DBClientConnection * c;
+ shared_ptr<DBClientConnection> c;
{
scoped_lock lk( _lock );
c = _nodes[i].conn;
}
string maybePrimary;
- if ( _checkConnection( c , maybePrimary , retry ) ) {
+ if ( _checkConnection( c.get() , maybePrimary , retry , i ) ) {
_master = i;
- return;
+ newMaster = i;
+ if ( ! checkAllSecondaries )
+ return;
}
if ( ! triedQuickCheck && maybePrimary.size() ) {
@@ -367,36 +434,44 @@ namespace mongo {
if ( x >= 0 ) {
triedQuickCheck = true;
string dummy;
- DBClientConnection * testConn;
+ shared_ptr<DBClientConnection> testConn;
{
scoped_lock lk( _lock );
testConn = _nodes[x].conn;
}
- if ( _checkConnection( testConn , dummy , false ) ) {
+ if ( _checkConnection( testConn.get() , dummy , false , x ) ) {
_master = x;
- return;
+ newMaster = x;
+ if ( ! checkAllSecondaries )
+ return;
}
}
}
}
+
+ if ( newMaster >= 0 )
+ return;
+
sleepsecs(1);
}
}
- void ReplicaSetMonitor::check() {
+ void ReplicaSetMonitor::check( bool checkAllSecondaries ) {
// first see if the current master is fine
if ( _master >= 0 ) {
string temp;
- if ( _checkConnection( _nodes[_master].conn , temp , false ) ) {
- // current master is fine, so we're done
- return;
+ if ( _checkConnection( _nodes[_master].conn.get() , temp , false , _master ) ) {
+ if ( ! checkAllSecondaries ) {
+ // current master is fine, so we're done
+ return;
+ }
}
}
// we either have no master, or the current is dead
- _check();
+ _check( checkAllSecondaries );
}
int ReplicaSetMonitor::_find( const string& server ) const {
@@ -419,7 +494,26 @@ namespace mongo {
return i;
return -1;
}
-
+
+ void ReplicaSetMonitor::appendInfo( BSONObjBuilder& b ) const {
+ scoped_lock lk( _lock );
+ BSONArrayBuilder hosts( b.subarrayStart( "hosts" ) );
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ hosts.append( BSON( "addr" << _nodes[i].addr <<
+ // "lastIsMaster" << _nodes[i].lastIsMaster << // this is a potential race, so only used when debugging
+ "ok" << _nodes[i].ok <<
+ "ismaster" << _nodes[i].ismaster <<
+ "hidden" << _nodes[i].hidden <<
+ "secondary" << _nodes[i].secondary <<
+ "pingTimeMillis" << _nodes[i].pingTimeMillis ) );
+
+ }
+ hosts.done();
+
+ b.append( "master" , _master );
+ b.append( "nextSlave" , _nextSlave );
+ }
+
mongo::mutex ReplicaSetMonitor::_setsLock( "ReplicaSetMonitor" );
map<string,ReplicaSetMonitorPtr> ReplicaSetMonitor::_sets;
@@ -428,8 +522,9 @@ namespace mongo {
// ----- DBClientReplicaSet ---------
// --------------------------------
- DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers )
- : _monitor( ReplicaSetMonitor::get( name , servers ) ) {
+ DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout )
+ : _monitor( ReplicaSetMonitor::get( name , servers ) ),
+ _so_timeout( so_timeout ) {
}
DBClientReplicaSet::~DBClientReplicaSet() {
@@ -446,7 +541,7 @@ namespace mongo {
}
_masterHost = _monitor->getMaster();
- _master.reset( new DBClientConnection( true , this ) );
+ _master.reset( new DBClientConnection( true , this , _so_timeout ) );
string errmsg;
if ( ! _master->connect( _masterHost , errmsg ) ) {
_monitor->notifyFailure( _masterHost );
@@ -464,12 +559,12 @@ namespace mongo {
return _slave.get();
_monitor->notifySlaveFailure( _slaveHost );
_slaveHost = _monitor->getSlave();
- }
+ }
else {
_slaveHost = h;
}
- _slave.reset( new DBClientConnection( true , this ) );
+ _slave.reset( new DBClientConnection( true , this , _so_timeout ) );
_slave->connect( _slaveHost );
_auth( _slave.get() );
return _slave.get();
@@ -522,12 +617,12 @@ namespace mongo {
// ------------- simple functions -----------------
- void DBClientReplicaSet::insert( const string &ns , BSONObj obj ) {
- checkMaster()->insert(ns, obj);
+ void DBClientReplicaSet::insert( const string &ns , BSONObj obj , int flags) {
+ checkMaster()->insert(ns, obj, flags);
}
- void DBClientReplicaSet::insert( const string &ns, const vector< BSONObj >& v ) {
- checkMaster()->insert(ns, v);
+ void DBClientReplicaSet::insert( const string &ns, const vector< BSONObj >& v , int flags) {
+ checkMaster()->insert(ns, v, flags);
}
void DBClientReplicaSet::remove( const string &ns , Query obj , bool justOne ) {
@@ -545,12 +640,12 @@ namespace mongo {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
return checkSlaveQueryResult( checkSlave()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize) );
}
catch ( DBException &e ) {
- log() << "can't query replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't query replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
}
}
}
@@ -563,12 +658,12 @@ namespace mongo {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
return checkSlave()->findOne(ns,query,fieldsToReturn,queryOptions);
}
catch ( DBException &e ) {
- LOG(1) << "can't findone replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't findone replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
}
}
}
@@ -584,23 +679,22 @@ namespace mongo {
assert(0);
}
- auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ void DBClientReplicaSet::isntMaster() {
+ log() << "got not master for: " << _masterHost << endl;
+ _monitor->notifyFailure( _masterHost );
+ _master.reset();
+ }
- bool isError = result->hasResultFlag( ResultFlag_ErrSet );
+ auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ BSONObj error;
+ bool isError = result->peekError( &error );
if( ! isError ) return result;
- BSONObj error = result->peekOne();
-
- BSONElement code = error["code"];
- if( code.eoo() || ! code.isNumber() ){
- warning() << "no code for error from secondary host " << _slaveHost << ", error was " << error << endl;
- return result;
- }
-
// We only check for "not master or secondary" errors here
// If the error code here ever changes, we need to change this code also
- if( code.Int() == 13436 /* not master or secondary */ ){
+ BSONElement code = error["code"];
+ if( code.isNumber() && code.Int() == 13436 /* not master or secondary */ ){
isntSecondary();
throw DBException( str::stream() << "slave " << _slaveHost.toString() << " is no longer secondary", 14812 );
}
@@ -615,20 +709,123 @@ namespace mongo {
_slave.reset();
}
+ void DBClientReplicaSet::say( Message& toSend, bool isRetry ) {
- void DBClientReplicaSet::isntMaster() {
- log() << "got not master for: " << _masterHost << endl;
- _monitor->notifyFailure( _masterHost );
- _master.reset();
+ if( ! isRetry )
+ _lazyState = LazyState();
+
+ int lastOp = -1;
+ bool slaveOk = false;
+
+ if ( ( lastOp = toSend.operation() ) == dbQuery ) {
+ // TODO: might be possible to do this faster by changing api
+ DbMessage dm( toSend );
+ QueryMessage qm( dm );
+ if ( ( slaveOk = ( qm.queryOptions & QueryOption_SlaveOk ) ) ) {
+
+ for ( int i = _lazyState._retries; i < 3; i++ ) {
+ try {
+ DBClientConnection* slave = checkSlave();
+ slave->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = i;
+ _lazyState._lastClient = slave;
+ return;
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't callLazy replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ }
+ }
+ }
+ }
+
+ DBClientConnection* master = checkMaster();
+ master->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = 3;
+ _lazyState._lastClient = master;
+ return;
+ }
+
+ bool DBClientReplicaSet::recv( Message& m ) {
+
+ assert( _lazyState._lastClient );
+
+ // TODO: It would be nice if we could easily wrap a conn error as a result error
+ try {
+ return _lazyState._lastClient->recv( m );
+ }
+ catch( DBException& e ){
+ log() << "could not receive data from " << _lazyState._lastClient << causedBy( e ) << endl;
+ return false;
+ }
+ }
+
+ void DBClientReplicaSet::checkResponse( const char* data, int nReturned, bool* retry, string* targetHost ){
+
+ // For now, do exactly as we did before, so as not to break things. In general though, we
+ // should fix this so checkResponse has a more consistent contract.
+ if( ! retry ){
+ if( _lazyState._lastClient )
+ return _lazyState._lastClient->checkResponse( data, nReturned );
+ else
+ return checkMaster()->checkResponse( data, nReturned );
+ }
+
+ *retry = false;
+ if( targetHost && _lazyState._lastClient ) *targetHost = _lazyState._lastClient->getServerAddress();
+ else if (targetHost) *targetHost = "";
+
+ if( ! _lazyState._lastClient ) return;
+ if( nReturned != 1 && nReturned != -1 ) return;
+
+ BSONObj dataObj;
+ if( nReturned == 1 ) dataObj = BSONObj( data );
+
+ // Check if we should retry here
+ if( _lazyState._lastOp == dbQuery && _lazyState._slaveOk ){
+
+ // Check the error code for a slave not secondary error
+ if( nReturned == -1 ||
+ ( hasErrField( dataObj ) && ! dataObj["code"].eoo() && dataObj["code"].Int() == 13436 ) ){
+
+ bool wasMaster = false;
+ if( _lazyState._lastClient == _slave.get() ){
+ isntSecondary();
+ }
+ else if( _lazyState._lastClient == _master.get() ){
+ wasMaster = true;
+ isntMaster();
+ }
+ else
+ warning() << "passed " << dataObj << " but last rs client " << _lazyState._lastClient->toString() << " is not master or secondary" << endl;
+
+ if( _lazyState._retries < 3 ){
+ _lazyState._retries++;
+ *retry = true;
+ }
+ else{
+ (void)wasMaster; // silence set-but-not-used warning
+ // assert( wasMaster );
+ // printStackTrace();
+ log() << "too many retries (" << _lazyState._retries << "), could not get data from replica set" << endl;
+ }
+ }
+ }
}
+
bool DBClientReplicaSet::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
if ( toSend.operation() == dbQuery ) {
// TODO: might be possible to do this faster by changing api
DbMessage dm( toSend );
QueryMessage qm( dm );
if ( qm.queryOptions & QueryOption_SlaveOk ) {
- for ( int i=0; i<2; i++ ) {
+ for ( int i=0; i<3; i++ ) {
try {
DBClientConnection* s = checkSlave();
if ( actualServer )
@@ -636,7 +833,7 @@ namespace mongo {
return s->call( toSend , response , assertOk );
}
catch ( DBException &e ) {
- LOG(1) << "can't call replica set slave " << i << " : " << _slaveHost << e.what() << endl;
+ LOG(1) << "can't call replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
if ( actualServer )
*actualServer = "";
}
diff --git a/client/dbclient_rs.h b/client/dbclient_rs.h
index 548b46a..b6948a0 100644
--- a/client/dbclient_rs.h
+++ b/client/dbclient_rs.h
@@ -1,4 +1,4 @@
-/** @file dbclient_rs.h - connect to a Replica Set, from C++ */
+/** @file dbclient_rs.h Connect to a Replica Set, from C++ */
/* Copyright 2009 10gen Inc.
*
@@ -43,10 +43,16 @@ namespace mongo {
static ReplicaSetMonitorPtr get( const string& name , const vector<HostAndPort>& servers );
/**
+ * gets a cached Monitor per name or will return none if it doesn't exist
+ */
+ static ReplicaSetMonitorPtr get( const string& name );
+
+
+ /**
* checks all sets for current master and new secondaries
* usually only called from a BackgroundJob
*/
- static void checkAll();
+ static void checkAll( bool checkAllSecondaries );
/**
* this is called whenever the config of any repclia set changes
@@ -81,13 +87,15 @@ namespace mongo {
/**
* checks for current master and new secondaries
*/
- void check();
+ void check( bool checkAllSecondaries );
string getName() const { return _name; }
string getServerAddress() const;
bool contains( const string& server ) const;
+
+ void appendInfo( BSONObjBuilder& b ) const;
private:
/**
@@ -98,7 +106,7 @@ namespace mongo {
*/
ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
- void _check();
+ void _check( bool checkAllSecondaries );
/**
* Use replSetGetStatus command to make sure hosts in host list are up
@@ -119,9 +127,10 @@ namespace mongo {
* @param c the connection to check
* @param maybePrimary OUT
* @param verbose
+ * @param nodesOffset - offset into _nodes array, -1 for not in it
* @return if the connection is good
*/
- bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose );
+ bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset );
int _find( const string& server ) const ;
int _find_inlock( const string& server ) const ;
@@ -132,14 +141,44 @@ namespace mongo {
string _name;
struct Node {
- Node( const HostAndPort& a , DBClientConnection* c ) : addr( a ) , conn(c) , ok(true) {}
+ Node( const HostAndPort& a , DBClientConnection* c )
+ : addr( a ) , conn(c) , ok(true) ,
+ ismaster(false), secondary( false ) , hidden( false ) , pingTimeMillis(0) {
+ }
+
+ bool okForSecondaryQueries() const {
+ return ok && secondary && ! hidden;
+ }
+
+ BSONObj toBSON() const {
+ return BSON( "addr" << addr.toString() <<
+ "isMaster" << ismaster <<
+ "secondary" << secondary <<
+ "hidden" << hidden <<
+ "ok" << ok );
+ }
+
+ string toString() const {
+ return toBSON().toString();
+ }
+
HostAndPort addr;
- DBClientConnection* conn;
+ shared_ptr<DBClientConnection> conn;
// if this node is in a failure state
// used for slave routing
// this is too simple, should make it better
bool ok;
+
+ // as reported by ismaster
+ BSONObj lastIsMaster;
+
+ bool ismaster;
+ bool secondary;
+ bool hidden;
+
+ int pingTimeMillis;
+
};
/**
@@ -168,7 +207,7 @@ namespace mongo {
public:
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
- DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers );
+ DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 );
virtual ~DBClientReplicaSet();
/** Returns false if nomember of the set were reachable, or neither is
@@ -191,11 +230,11 @@ namespace mongo {
/** throws userassertion "no master found" */
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
- virtual void insert( const string &ns , BSONObj obj );
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
/** insert multiple objects. Note that single object insert is asynchronous, so this version
is only nominally faster and not worth a special effort to try to use. */
- virtual void insert( const string &ns, const vector< BSONObj >& v );
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
virtual void remove( const string &ns , Query obj , bool justOne = 0 );
@@ -210,11 +249,14 @@ namespace mongo {
// ---- callback pieces -------
- virtual void checkResponse( const char *data, int nReturned ) { checkMaster()->checkResponse( data , nReturned ); }
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message &toRecv );
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL );
/* this is the callback from our underlying connections to notify us that we got a "not master" error.
*/
void isntMaster();
+
/* this is used to indicate we got a "not master or secondary" error from a secondary.
*/
void isntSecondary();
@@ -225,16 +267,18 @@ namespace mongo {
// ----- informational ----
+ double getSoTimeout() const { return _so_timeout; }
+
string toString() { return getServerAddress(); }
string getServerAddress() const { return _monitor->getServerAddress(); }
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+ virtual bool lazySupported() const { return true; }
// ---- low level ------
virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
- virtual void say( Message &toSend ) { checkMaster()->say( toSend ); }
virtual bool callRead( Message& toSend , Message& response ) { return checkMaster()->callRead( toSend , response ); }
@@ -258,6 +302,8 @@ namespace mongo {
HostAndPort _slaveHost;
scoped_ptr<DBClientConnection> _slave;
+
+ double _so_timeout;
/**
* for storing authentication info
@@ -277,6 +323,22 @@ namespace mongo {
// this could be a security issue, as the password is stored in memory
// not sure if/how we should handle
list<AuthInfo> _auths;
+
+ protected:
+
+ /**
+ * for storing (non-threadsafe) information between lazy calls
+ */
+ class LazyState {
+ public:
+ LazyState() : _lastClient( NULL ), _lastOp( -1 ), _slaveOk( false ), _retries( 0 ) {}
+ DBClientConnection* _lastClient;
+ int _lastOp;
+ bool _slaveOk;
+ int _retries;
+
+ } _lazyState;
+
};
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
index 6c6afc0..5db360e 100644
--- a/client/dbclientcursor.cpp
+++ b/client/dbclientcursor.cpp
@@ -37,8 +37,7 @@ namespace mongo {
return batchSize < nToReturn ? batchSize : nToReturn;
}
- bool DBClientCursor::init() {
- Message toSend;
+ void DBClientCursor::_assembleInit( Message& toSend ) {
if ( !cursorId ) {
assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
}
@@ -50,12 +49,18 @@ namespace mongo {
b.appendNum( cursorId );
toSend.setData( dbGetMore, b.buf(), b.len() );
}
- if ( !_client->call( toSend, *m, false ) ) {
+ }
+
+ bool DBClientCursor::init() {
+ Message toSend;
+ _assembleInit( toSend );
+
+ if ( !_client->call( toSend, *b.m, false ) ) {
// log msg temp?
log() << "DBClientCursor::init call() failed" << endl;
return false;
}
- if ( m->empty() ) {
+ if ( b.m->empty() ) {
// log msg temp?
log() << "DBClientCursor::init message from call() was empty" << endl;
return false;
@@ -63,12 +68,41 @@ namespace mongo {
dataReceived();
return true;
}
+
+ void DBClientCursor::initLazy( bool isRetry ) {
+ verify( 15875 , _client->lazySupported() );
+ Message toSend;
+ _assembleInit( toSend );
+ _client->say( toSend, isRetry );
+ }
+
+ bool DBClientCursor::initLazyFinish( bool& retry ) {
+
+ bool recvd = _client->recv( *b.m );
+
+ // If we get a bad response, return false
+ if ( ! recvd || b.m->empty() ) {
+
+ if( !recvd )
+ log() << "DBClientCursor::init lazy say() failed" << endl;
+ if( b.m->empty() )
+ log() << "DBClientCursor::init message from say() was empty" << endl;
+
+ _client->checkResponse( NULL, -1, &retry, &_lazyHost );
+
+ return false;
+
+ }
+
+ dataReceived( retry, _lazyHost );
+ return ! retry;
+ }
void DBClientCursor::requestMore() {
- assert( cursorId && pos == nReturned );
+ assert( cursorId && b.pos == b.nReturned );
if (haveLimit) {
- nToReturn -= nReturned;
+ nToReturn -= b.nReturned;
assert(nToReturn > 0);
}
BufBuilder b;
@@ -83,7 +117,7 @@ namespace mongo {
if ( _client ) {
_client->call( toSend, *response );
- m = response;
+ this->b.m = response;
dataReceived();
}
else {
@@ -91,7 +125,7 @@ namespace mongo {
ScopedDbConnection conn( _scopedHost );
conn->call( toSend , *response );
_client = conn.get();
- m = response;
+ this->b.m = response;
dataReceived();
_client = 0;
conn.done();
@@ -100,19 +134,24 @@ namespace mongo {
/** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
void DBClientCursor::exhaustReceiveMore() {
- assert( cursorId && pos == nReturned );
+ assert( cursorId && b.pos == b.nReturned );
assert( !haveLimit );
auto_ptr<Message> response(new Message());
assert( _client );
_client->recv(*response);
- m = response;
+ b.m = response;
dataReceived();
}
- void DBClientCursor::dataReceived() {
- QueryResult *qr = (QueryResult *) m->singleData();
+ void DBClientCursor::dataReceived( bool& retry, string& host ) {
+
+ QueryResult *qr = (QueryResult *) b.m->singleData();
resultFlags = qr->resultFlags();
+ if ( qr->resultFlags() & ResultFlag_ErrSet ) {
+ wasError = true;
+ }
+
if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
// cursor id no longer valid at the server.
assert( qr->cursorId == 0 );
@@ -127,11 +166,12 @@ namespace mongo {
cursorId = qr->cursorId;
}
- nReturned = qr->nReturned;
- pos = 0;
- data = qr->data();
+ b.nReturned = qr->nReturned;
+ b.pos = 0;
+ b.data = qr->data();
+
+ _client->checkResponse( b.data, b.nReturned, &retry, &host ); // watches for "not master"
- _client->checkResponse( data, nReturned );
/* this assert would fire the way we currently work:
assert( nReturned || cursorId == 0 );
*/
@@ -144,17 +184,17 @@ namespace mongo {
if ( !_putBack.empty() )
return true;
- if (haveLimit && pos >= nToReturn)
+ if (haveLimit && b.pos >= nToReturn)
return false;
- if ( pos < nReturned )
+ if ( b.pos < b.nReturned )
return true;
if ( cursorId == 0 )
return false;
requestMore();
- return pos < nReturned;
+ return b.pos < b.nReturned;
}
BSONObj DBClientCursor::next() {
@@ -165,11 +205,11 @@ namespace mongo {
return ret;
}
- uassert(13422, "DBClientCursor next() called but more() is false", pos < nReturned);
+ uassert(13422, "DBClientCursor next() called but more() is false", b.pos < b.nReturned);
- pos++;
- BSONObj o(data);
- data += o.objsize();
+ b.pos++;
+ BSONObj o(b.data);
+ b.data += o.objsize();
/* todo would be good to make data null at end of batch for safety */
return o;
}
@@ -187,9 +227,9 @@ namespace mongo {
}
*/
- int p = pos;
- const char *d = data;
- while( m && p < nReturned ) {
+ int p = b.pos;
+ const char *d = b.data;
+ while( m && p < b.nReturned ) {
BSONObj o(d);
d += o.objsize();
p++;
@@ -198,6 +238,19 @@ namespace mongo {
}
}
+ bool DBClientCursor::peekError(BSONObj* error){
+ if( ! wasError ) return false;
+
+ vector<BSONObj> v;
+ peek(v, 1);
+
+ assert( v.size() == 1 );
+ assert( hasErrField( v[0] ) );
+
+ if( error ) *error = v[0].getOwned();
+ return true;
+ }
+
void DBClientCursor::attach( AScopedConnection * conn ) {
assert( _scopedHost.size() == 0 );
assert( conn );
@@ -205,14 +258,20 @@ namespace mongo {
if ( conn->get()->type() == ConnectionString::SET ||
conn->get()->type() == ConnectionString::SYNC ) {
- _scopedHost = _client->getServerAddress();
+ if( _lazyHost.size() > 0 )
+ _scopedHost = _lazyHost;
+ else if( _client )
+ _scopedHost = _client->getServerAddress();
+ else
+ massert(14821, "No client or lazy client specified, cannot store multi-host connection.", false);
}
else {
_scopedHost = conn->getHost();
}
-
+
conn->done();
_client = 0;
+ _lazyHost = "";
}
DBClientCursor::~DBClientCursor() {
@@ -221,12 +280,12 @@ namespace mongo {
DESTRUCTOR_GUARD (
- if ( cursorId && _ownCursor ) {
- BufBuilder b;
- b.appendNum( (int)0 ); // reserved
+ if ( cursorId && _ownCursor && ! inShutdown() ) {
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
b.appendNum( (int)1 ); // number
b.appendNum( cursorId );
-
+
Message m;
m.setData( dbKillCursors , b.buf() , b.len() );
diff --git a/client/dbclientcursor.h b/client/dbclientcursor.h
index d176b89..977bd30 100644
--- a/client/dbclientcursor.h
+++ b/client/dbclientcursor.h
@@ -18,7 +18,7 @@
#pragma once
#include "../pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../db/jsobj.h"
#include "../db/json.h"
#include <stack>
@@ -52,7 +52,7 @@ namespace mongo {
if you want to exhaust whatever data has been fetched to the client already but
then perhaps stop.
*/
- int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + nReturned - pos; }
+ int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + b.nReturned - b.pos; }
bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
/** next
@@ -71,11 +71,11 @@ namespace mongo {
/** throws AssertionException if get back { $err : ... } */
BSONObj nextSafe() {
BSONObj o = next();
- BSONElement e = o.firstElement();
- if( strcmp(e.fieldName(), "$err") == 0 ) {
+ if( strcmp(o.firstElementFieldName(), "$err") == 0 ) {
+ string s = "nextSafe(): " + o.toString();
if( logLevel >= 5 )
- log() << "nextSafe() error " << o.toString() << endl;
- uassert(13106, "nextSafe(): " + o.toString(), false);
+ log() << s << endl;
+ uasserted(13106, s);
}
return o;
}
@@ -86,11 +86,11 @@ namespace mongo {
WARNING: no support for _putBack yet!
*/
void peek(vector<BSONObj>&, int atMost);
- BSONObj peekOne(){
- vector<BSONObj> v;
- peek( v, 1 );
- return v.size() > 0 ? v[0] : BSONObj();
- }
+
+ /**
+ * peek ahead and see if an error occurred, and get the error if so.
+ */
+ bool peekError(BSONObj* error = NULL);
/**
iterate the rest of the cursor and return the number if items
@@ -109,13 +109,9 @@ namespace mongo {
'dead' may be preset yet some data still queued and locally
available from the dbclientcursor.
*/
- bool isDead() const {
- return !this || cursorId == 0;
- }
+ bool isDead() const { return !this || cursorId == 0; }
- bool tailable() const {
- return (opts & QueryOption_CursorTailable) != 0;
- }
+ bool tailable() const { return (opts & QueryOption_CursorTailable) != 0; }
/** see ResultFlagType (constants.h) for flag values
mostly these flags are for internal purposes -
@@ -137,12 +133,9 @@ namespace mongo {
fieldsToReturn(_fieldsToReturn),
opts(queryOptions),
batchSize(bs==1?2:bs),
- m(new Message()),
cursorId(),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ) {
+ _ownCursor( true ),
+ wasError( false ) {
}
DBClientCursor( DBClientBase* client, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
@@ -151,11 +144,7 @@ namespace mongo {
nToReturn( _nToReturn ),
haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
opts( options ),
- m(new Message()),
- cursorId( _cursorId ),
- nReturned(),
- pos(),
- data(),
+ cursorId(_cursorId),
_ownCursor( true ) {
}
@@ -170,11 +159,31 @@ namespace mongo {
void attach( AScopedConnection * conn );
+ /**
+ * actually does the query
+ */
+ bool init();
+
+ void initLazy( bool isRetry = false );
+ bool initLazyFinish( bool& retry );
+
+ class Batch : boost::noncopyable {
+ friend class DBClientCursor;
+ auto_ptr<Message> m;
+ int nReturned;
+ int pos;
+ const char *data;
+ public:
+ Batch() : m( new Message() ), nReturned(), pos(), data() { }
+ };
+
private:
friend class DBClientBase;
friend class DBClientConnection;
- bool init();
+
int nextBatchSize();
+
+ Batch b;
DBClientBase* _client;
string ns;
BSONObj query;
@@ -184,18 +193,18 @@ namespace mongo {
const BSONObj *fieldsToReturn;
int opts;
int batchSize;
- auto_ptr<Message> m;
stack< BSONObj > _putBack;
int resultFlags;
long long cursorId;
- int nReturned;
- int pos;
- const char *data;
- void dataReceived();
- void requestMore();
- void exhaustReceiveMore(); // for exhaust
bool _ownCursor; // see decouple()
string _scopedHost;
+ string _lazyHost;
+ bool wasError;
+
+ void dataReceived() { bool retry; string lazyHost; dataReceived( retry, lazyHost ); }
+ void dataReceived( bool& retry, string& lazyHost );
+ void requestMore();
+ void exhaustReceiveMore(); // for exhaust
// Don't call from a virtual function
void _assertIfNull() const { uassert(13348, "connection died", this); }
@@ -203,6 +212,9 @@ namespace mongo {
// non-copyable , non-assignable
DBClientCursor( const DBClientCursor& );
DBClientCursor& operator=( const DBClientCursor& );
+
+ // init pieces
+ void _assembleInit( Message& toSend );
};
/** iterate over objects in current batch only - will not cause a network call
diff --git a/client/distlock.cpp b/client/distlock.cpp
index 9ec98ea..cb71159 100644
--- a/client/distlock.cpp
+++ b/client/distlock.cpp
@@ -21,8 +21,7 @@
namespace mongo {
- static string lockPingNS = "config.lockpings";
- static string locksNS = "config.locks";
+ LabeledLevel DistributedLock::logLvl( 1 );
ThreadLocalValue<string> distLockIds("");
@@ -36,7 +35,7 @@ namespace mongo {
static void initModule() {
// cache process string
stringstream ss;
- ss << getHostName() << ":" << time(0) << ":" << rand();
+ ss << getHostName() << ":" << cmdLine.port << ":" << time(0) << ":" << rand();
_cachedProcessString = new string( ss.str() );
}
@@ -59,116 +58,406 @@ namespace mongo {
return s;
}
- void _distLockPingThread( ConnectionString addr ) {
- setThreadName( "LockPinger" );
-
- log() << "creating dist lock ping thread for: " << addr << endl;
- static int loops = 0;
- while( ! inShutdown() ) {
+ class DistributedLockPinger {
+ public:
- string process = getDistLockProcess();
- log(4) << "dist_lock about to ping for: " << process << endl;
+ DistributedLockPinger()
+ : _mutex( "DistributedLockPinger" ) {
+ }
- try {
- ScopedDbConnection conn( addr );
-
- // refresh the entry corresponding to this process in the lockpings collection
- conn->update( lockPingNS ,
- BSON( "_id" << process ) ,
- BSON( "$set" << BSON( "ping" << DATENOW ) ) ,
- true );
- string err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock process: " << process << " pinging: " << addr << " failed: "
- << err << endl;
- conn.done();
- sleepsecs(30);
- continue;
- }
+ void _distLockPingThread( ConnectionString addr, string process, unsigned long long sleepTime ) {
+
+ setThreadName( "LockPinger" );
+
+ string pingId = pingThreadId( addr, process );
+
+ log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
+ << " and process " << process
+ << " (sleeping for " << sleepTime << "ms)" << endl;
+
+ static int loops = 0;
+ while( ! inShutdown() && ! shouldKill( addr, process ) ) {
+
+ log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
+
+ Date_t pingTime;
+
+ try {
+ ScopedDbConnection conn( addr );
+
+ pingTime = jsTime();
- // remove really old entries from the lockpings collection if they're not holding a lock
- // (this may happen if an instance of a process was taken down and no new instance came up to
- // replace it for a quite a while)
- // if the lock is taken, the take-over mechanism should handle the situation
- auto_ptr<DBClientCursor> c = conn->query( locksNS , BSONObj() );
- vector<string> pids;
- while ( c->more() ) {
- BSONObj lock = c->next();
- if ( ! lock["process"].eoo() ) {
- pids.push_back( lock["process"].valuestrsafe() );
+ // refresh the entry corresponding to this process in the lockpings collection
+ conn->update( DistributedLock::lockPingNS ,
+ BSON( "_id" << process ) ,
+ BSON( "$set" << BSON( "ping" << pingTime ) ) ,
+ true );
+
+ string err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // remove really old entries from the lockpings collection if they're not holding a lock
+ // (this may happen if an instance of a process was taken down and no new instance came up to
+ // replace it for a quite a while)
+ // if the lock is taken, the take-over mechanism should handle the situation
+ auto_ptr<DBClientCursor> c = conn->query( DistributedLock::locksNS , BSONObj() );
+ set<string> pids;
+ while ( c->more() ) {
+ BSONObj lock = c->next();
+ if ( ! lock["process"].eoo() ) {
+ pids.insert( lock["process"].valuestrsafe() );
+ }
+ }
+
+ Date_t fourDays = pingTime - ( 4 * 86400 * 1000 ); // 4 days
+ conn->remove( DistributedLock::lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
+ err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "ping cleanup for distributed lock pinger '" << pingId << " failed."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // create index so remove is fast even with a lot of servers
+ if ( loops++ == 0 ) {
+ conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
+ }
+
+ log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
+ << " by distributed lock pinger '" << pingId
+ << "', sleeping for " << sleepTime << "ms" << endl;
+
+ // Remove old locks, if possible
+ // Make sure no one else is adding to this list at the same time
+ scoped_lock lk( _mutex );
+
+ int numOldLocks = _oldLockOIDs.size();
+ if( numOldLocks > 0 )
+ log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
+
+ bool removed = false;
+ for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
+ i = ( removed ? _oldLockOIDs.erase( i ) : ++i ) ) {
+ removed = false;
+ try {
+ // Got OID from lock with id, so we don't need to specify id again
+ conn->update( DistributedLock::locksNS ,
+ BSON( "ts" << *i ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ // Either the update went through or it didn't, either way we're done trying to
+ // unlock
+ log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch ( std::exception& e) {
+ warning() << "could not remove old distributed lock with ts " << *i
+ << causedBy( e ) << endl;
+ }
+
+ }
+
+ if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
+ log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
}
- }
- Date_t fourDays = jsTime() - ( 4 * 86400 * 1000 ); // 4 days
- conn->remove( lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
- err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock cleanup request from process: " << process << " to: " << addr
- << " failed: " << err << endl;
conn.done();
- sleepsecs(30);
- continue;
- }
- // create index so remove is fast even with a lot of servers
- if ( loops++ == 0 ) {
- conn->ensureIndex( lockPingNS , BSON( "ping" << 1 ) );
+ }
+ catch ( std::exception& e ) {
+ warning() << "distributed lock pinger '" << pingId << "' detected an exception while pinging."
+ << causedBy( e ) << endl;
}
- conn.done();
+ sleepmillis(sleepTime);
+ }
+
+ warning() << "removing distributed lock ping thread '" << pingId << "'" << endl;
+
+
+ if( shouldKill( addr, process ) )
+ finishKill( addr, process );
+
+ }
+
+ void distLockPingThread( ConnectionString addr, long long clockSkew, string processId, unsigned long long sleepTime ) {
+ try {
+ jsTimeVirtualThreadSkew( clockSkew );
+ _distLockPingThread( addr, processId, sleepTime );
}
catch ( std::exception& e ) {
- warning() << "dist_lock exception during ping: " << e.what() << endl;
+ error() << "unexpected error while running distributed lock pinger for " << addr << ", process " << processId << causedBy( e ) << endl;
}
+ catch ( ... ) {
+ error() << "unknown error while running distributed lock pinger for " << addr << ", process " << processId << endl;
+ }
+ }
- log( loops % 10 == 0 ? 0 : 1) << "dist_lock pinged successfully for: " << process << endl;
- sleepsecs(30);
+ string pingThreadId( const ConnectionString& conn, const string& processId ) {
+ return conn.toString() + "/" + processId;
}
- }
- void distLockPingThread( ConnectionString addr ) {
- try {
- _distLockPingThread( addr );
+ string got( DistributedLock& lock, unsigned long long sleepTime ) {
+
+ // Make sure we don't start multiple threads for a process id
+ scoped_lock lk( _mutex );
+
+ const ConnectionString& conn = lock.getRemoteConnection();
+ const string& processId = lock.getProcessId();
+ string s = pingThreadId( conn, processId );
+
+ // Ignore if we already have a pinging thread for this process.
+ if ( _seen.count( s ) > 0 ) return "";
+
+ // Check our clock skew
+ try {
+ if( lock.isRemoteTimeSkewed() ) {
+ throw LockException( str::stream() << "clock skew of the cluster " << conn.toString() << " is too far out of bounds to allow distributed locking." , 13650 );
+ }
+ }
+ catch( LockException& e) {
+ throw LockException( str::stream() << "error checking clock skew of cluster " << conn.toString() << causedBy( e ) , 13651);
+ }
+
+ boost::thread t( boost::bind( &DistributedLockPinger::distLockPingThread, this, conn, getJSTimeVirtualThreadSkew(), processId, sleepTime) );
+
+ _seen.insert( s );
+
+ return s;
}
- catch ( std::exception& e ) {
- error() << "unexpected error in distLockPingThread: " << e.what() << endl;
+
+ void addUnlockOID( const OID& oid ) {
+ // Modifying the lock from some other thread
+ scoped_lock lk( _mutex );
+ _oldLockOIDs.push_back( oid );
}
- catch ( ... ) {
- error() << "unexpected unknown error in distLockPingThread" << endl;
+
+ bool willUnlockOID( const OID& oid ) {
+ scoped_lock lk( _mutex );
+ return find( _oldLockOIDs.begin(), _oldLockOIDs.end(), oid ) != _oldLockOIDs.end();
}
- }
+ void kill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
+ scoped_lock lk( _mutex );
- class DistributedLockPinger {
- public:
- DistributedLockPinger()
- : _mutex( "DistributedLockPinger" ) {
+ string pingId = pingThreadId( conn, processId );
+
+ assert( _seen.count( pingId ) > 0 );
+ _kill.insert( pingId );
+
+ }
+
+ bool shouldKill( const ConnectionString& conn, const string& processId ) {
+ return _kill.count( pingThreadId( conn, processId ) ) > 0;
}
- void got( const ConnectionString& conn ) {
- string s = conn.toString();
+ void finishKill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
scoped_lock lk( _mutex );
- if ( _seen.count( s ) > 0 )
- return;
- boost::thread t( boost::bind( &distLockPingThread , conn ) );
- _seen.insert( s );
+
+ string pingId = pingThreadId( conn, processId );
+
+ _kill.erase( pingId );
+ _seen.erase( pingId );
+
}
+ set<string> _kill;
set<string> _seen;
mongo::mutex _mutex;
+ list<OID> _oldLockOIDs;
} distLockPinger;
- DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes )
- : _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes) {
- _id = BSON( "_id" << name );
- _ns = "config.locks";
- distLockPinger.got( conn );
+
+ const string DistributedLock::lockPingNS = "config.lockpings";
+ const string DistributedLock::locksNS = "config.locks";
+
+ /**
+ * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
+ * specified (time between pings)
+ */
+ DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout, bool asProcess )
+ : _conn(conn) , _name(name) , _id( BSON( "_id" << name ) ), _processId( asProcess ? getDistLockId() : getDistLockProcess() ),
+ _lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
+ _mutex( "DistributedLock" )
+ {
+ log( logLvl - 1 ) << "created new distributed lock for " << name << " on " << conn
+ << " ( lock timeout : " << _lockTimeout
+ << ", ping interval : " << _lockPing << ", process : " << asProcess << " )" << endl;
+ }
+
+ Date_t DistributedLock::getRemoteTime() {
+ return DistributedLock::remoteTime( _conn, _maxNetSkew );
+ }
+
+ bool DistributedLock::isRemoteTimeSkewed() {
+ return !DistributedLock::checkSkew( _conn, NUM_LOCK_SKEW_CHECKS, _maxClockSkew, _maxNetSkew );
+ }
+
+ const ConnectionString& DistributedLock::getRemoteConnection() {
+ return _conn;
+ }
+
+ const string& DistributedLock::getProcessId() {
+ return _processId;
+ }
+
+ /**
+ * Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
+ * and the actual time on the remote server (at the completion of the function) is the maxNetSkew
+ */
+ Date_t DistributedLock::remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew ) {
+
+ ConnectionString server( *cluster.getServers().begin() );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ long long delay;
+
+ try {
+ Date_t then = jsTime();
+ bool success = conn->runCommand( string("admin"), BSON( "serverStatus" << 1 ), result );
+ delay = jsTime() - then;
+
+ if( !success )
+ throw TimeNotFoundException( str::stream() << "could not get status from server "
+ << server.toString() << " in cluster " << cluster.toString()
+ << " to check time", 13647 );
+
+ // Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
+ // time value can be off by if we assume a response in the middle of the delay.
+ if( delay > (long long) (maxNetSkew * 2) )
+ throw TimeNotFoundException( str::stream() << "server " << server.toString()
+ << " in cluster " << cluster.toString()
+ << " did not respond within max network delay of "
+ << maxNetSkew << "ms", 13648 );
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ return result["localTime"].Date() - (delay / 2);
+
+ }
+
+ bool DistributedLock::checkSkew( const ConnectionString& cluster, unsigned skewChecks, unsigned long long maxClockSkew, unsigned long long maxNetSkew ) {
+
+ vector<HostAndPort> servers = cluster.getServers();
+
+ if(servers.size() < 1) return true;
+
+ vector<long long> avgSkews;
+
+ for(unsigned i = 0; i < skewChecks; i++) {
+
+ // Find the average skew for each server
+ unsigned s = 0;
+ for(vector<HostAndPort>::iterator si = servers.begin(); si != servers.end(); ++si,s++) {
+
+ if(i == 0) avgSkews.push_back(0);
+
+ // Could check if this is self, but shouldn't matter since local network connection should be fast.
+ ConnectionString server( *si );
+
+ vector<long long> skew;
+
+ BSONObj result;
+
+ Date_t remote = remoteTime( server, maxNetSkew );
+ Date_t local = jsTime();
+
+ // Remote time can be delayed by at most MAX_NET_SKEW
+
+ // Skew is how much time we'd have to add to local to get to remote
+ avgSkews[s] += (long long) (remote - local);
+
+ log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
+
+ }
+ }
+
+ // Analyze skews
+
+ long long serverMaxSkew = 0;
+ long long serverMinSkew = 0;
+
+ for(unsigned s = 0; s < avgSkews.size(); s++) {
+
+ long long avgSkew = (avgSkews[s] /= skewChecks);
+
+ // Keep track of max and min skews
+ if(s == 0) {
+ serverMaxSkew = avgSkew;
+ serverMinSkew = avgSkew;
+ }
+ else {
+ if(avgSkew > serverMaxSkew)
+ serverMaxSkew = avgSkew;
+ if(avgSkew < serverMinSkew)
+ serverMinSkew = avgSkew;
+ }
+
+ }
+
+ long long totalSkew = serverMaxSkew - serverMinSkew;
+
+ // Make sure our max skew is not more than our pre-set limit
+ if(totalSkew > (long long) maxClockSkew) {
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
+ return false;
+ }
+
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
+ return true;
+ }
+
+ // For use in testing, ping thread should run indefinitely in practice.
+ bool DistributedLock::killPinger( DistributedLock& lock ) {
+ if( lock._threadId == "") return false;
+
+ distLockPinger.kill( lock._conn, lock._processId );
+ return true;
}
+ // Semantics of this method are basically that if the lock cannot be acquired, returns false, can be retried.
+ // If the lock should not be tried again (some unexpected error) a LockException is thrown.
+ // If we are only trying to re-enter a currently held lock, reenter should be true.
+ // Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still only
+ // be unlocked once, instead it is used to verify that the lock is already held.
+ bool DistributedLock::lock_try( const string& why , bool reenter, BSONObj * other ) {
+
+ // TODO: Start pinging only when we actually get the lock?
+ // If we don't have a thread pinger, make sure we shouldn't have one
+ if( _threadId == "" ){
+ scoped_lock lk( _mutex );
+ _threadId = distLockPinger.got( *this, _lockPing );
+ }
+
+ // This should always be true, if not, we are using the lock incorrectly.
+ assert( _name != "" );
- bool DistributedLock::lock_try( string why , BSONObj * other ) {
// write to dummy if 'other' is null
BSONObj dummyOther;
if ( other == NULL )
@@ -182,93 +471,240 @@ namespace mongo {
{
// make sure its there so we can use simple update logic below
- BSONObj o = conn->findOne( _ns , _id ).getOwned();
+ BSONObj o = conn->findOne( locksNS , _id ).getOwned();
+
+ // Case 1: No locks
if ( o.isEmpty() ) {
try {
- log(4) << "dist_lock inserting initial doc in " << _ns << " for lock " << _name << endl;
- conn->insert( _ns , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
+ log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
+ conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
catch ( UserException& e ) {
- log() << "dist_lock could not insert initial doc: " << e << endl;
+ warning() << "could not insert initial doc for distributed lock " << _name << causedBy( e ) << endl;
}
}
-
+
+ // Case 2: A set lock that we might be able to force
else if ( o["state"].numberInt() > 0 ) {
+
+ string lockName = o["_id"].String() + string("/") + o["process"].String();
+
+ bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
+ if( reenter && ! canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
+ if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
+ else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
+ else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
+
+ // reset since we've been bounced by a previous lock not being where we thought it was,
+ // and should go through full forcing process if required.
+ // (in theory we should never see a ping here if used correctly)
+ *other = o; other->getOwned(); conn.done(); resetLastPing();
+ return false;
+ }
+
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
if ( lastPing.isEmpty() ) {
- // if a lock is taken but there's no ping for it, we're in an inconsistent situation
- // if the lock holder (mongos or d) does not exist anymore, the lock could safely be removed
- // but we'd require analysis of the situation before a manual intervention
- error() << "config.locks: " << _name << " lock is taken by old process? "
- << "remove the following lock if the process is not active anymore: " << o << endl;
- *other = o;
- conn.done();
- return false;
+ log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
+ // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
+ lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
}
- unsigned long long now = jsTime();
- unsigned long long pingTime = lastPing["ping"].Date();
-
- if ( now < pingTime ) {
- // clock skew
- warning() << "dist_lock has detected clock skew of " << ( pingTime - now ) << "ms" << endl;
- *other = o;
- conn.done();
- return false;
+ unsigned long long elapsed = 0;
+ unsigned long long takeover = _lockTimeout;
+
+ log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.get<0>() << " and ping " << _lastPingCheck.get<1>() << endl;
+
+ try {
+
+ Date_t remote = remoteTime( _conn );
+
+ // Timeout the elapsed time using comparisons of remote clock
+ // For non-finalized locks, timeout 15 minutes since last seen (ts)
+ // For finalized locks, timeout 15 minutes since last ping
+ bool recPingChange = o["state"].numberInt() == 2 && ( _lastPingCheck.get<0>() != lastPing["_id"].String() || _lastPingCheck.get<1>() != lastPing["ping"].Date() );
+ bool recTSChange = _lastPingCheck.get<3>() != o["ts"].OID();
+
+ if( recPingChange || recTSChange ) {
+ // If the ping has changed since we last checked, mark the current date and time
+ scoped_lock lk( _mutex );
+ _lastPingCheck = boost::tuple<string, Date_t, Date_t, OID>( lastPing["_id"].String().c_str(), lastPing["ping"].Date(), remote, o["ts"].OID() );
+ }
+ else {
+
+ // GOTCHA! Due to network issues, it is possible that the current time
+ // is less than the remote time. We *have* to check this here, otherwise
+ // we overflow and our lock breaks.
+ if(_lastPingCheck.get<2>() >= remote)
+ elapsed = 0;
+ else
+ elapsed = remote - _lastPingCheck.get<2>();
+ }
+
}
-
- unsigned long long elapsed = now - pingTime;
- elapsed = elapsed / ( 1000 * 60 ); // convert to minutes
-
- if ( elapsed > ( 60 * 24 * 365 * 100 ) /* 100 years */ ) {
- warning() << "distlock elapsed time seems impossible: " << lastPing << endl;
+ catch( LockException& e ) {
+
+ // Remote server cannot be found / is not responsive
+ warning() << "Could not get remote time from " << _conn << causedBy( e );
+ // If our config server is having issues, forget all the pings until we can see it again
+ resetLastPing();
+
}
-
- if ( elapsed <= _takeoverMinutes ) {
- log(1) << "dist_lock lock failed because taken by: " << o << " elapsed minutes: " << elapsed << endl;
- *other = o;
- conn.done();
+
+ if ( elapsed <= takeover && ! canReenter ) {
+ log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
return false;
}
-
- log() << "dist_lock forcefully taking over from: " << o << " elapsed minutes: " << elapsed << endl;
- conn->update( _ns , _id , BSON( "$set" << BSON( "state" << 0 ) ) );
- string err = conn->getLastError();
- if ( ! err.empty() ) {
- warning() << "dist_lock take over from: " << o << " failed: " << err << endl;
- *other = o.getOwned();
- other->getOwned();
- conn.done();
+ else if( elapsed > takeover && canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
return false;
}
+ log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
+ << ( canReenter ? "re-entering is allowed, " : "" )
+ << "elapsed time " << elapsed << " > takeover time " << takeover << endl;
+
+ if( elapsed > takeover ) {
+
+ // Lock may forced, reset our timer if succeeds or fails
+ // Ensures that another timeout must happen if something borks up here, and resets our pristine
+ // ping state if acquired.
+ resetLastPing();
+
+ try {
+
+ // Check the clock skew again. If we check this before we get a lock
+ // and after the lock times out, we can be pretty sure the time is
+ // increasing at the same rate on all servers and therefore our
+ // timeout is accurate
+ uassert( 14023, str::stream() << "remote time in cluster " << _conn.toString() << " is now skewed, cannot force lock.", !isRemoteTimeSkewed() );
+
+ // Make sure we break the lock with the correct "ts" (OID) value, otherwise
+ // we can overwrite a new lock inserted in the meantime.
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << o["state"].numberInt() << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // Ok to continue since we know we forced at least one lock document, and all lock docs
+ // are required for a lock to be held.
+ warning() << "lock forcing " << lockName << " inconsistent" << endl;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception forcing distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ }
+ else {
+
+ assert( canReenter );
+
+ // Lock may be re-entered, reset our timer if succeeds or fails
+ // Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
+ // This ensures that two attempts are still required for a force if not acquired, and resets our
+ // state if we are acquired.
+ resetLastPing();
+
+ // Test that the lock is held by trying to update the finalized state of the lock to the same state
+ // if it does not update or does not update on all servers, we can't re-enter.
+ try {
+
+ // Test the lock with the correct "ts" (OID) value
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << 2 << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 2 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
+ << " gle: " << err
+ << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // NOT ok to continue since our lock isn't held by all servers, so isn't valid.
+ warning() << "inconsistent state re-entering lock, lock " << lockName << " not held" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception re-entering distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return true;
+
+ }
+
+ log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
+
+ // We don't need the ts value in the query, since we will only ever replace locks with state=0.
}
+ // Case 3: We have an expired lock
else if ( o["ts"].type() ) {
queryBuilder.append( o["ts"] );
}
}
- OID ts;
- ts.init();
+ // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
+ // and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
+ resetLastPing();
bool gotLock = false;
- BSONObj now;
+ BSONObj currLock;
- BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << getDistLockProcess() <<
- "when" << DATENOW << "why" << why << "ts" << ts );
+ BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << _processId <<
+ "when" << jsTime() << "why" << why << "ts" << OID::gen() );
BSONObj whatIWant = BSON( "$set" << lockDetails );
+
+ BSONObj query = queryBuilder.obj();
+
+ string lockName = _name + string("/") + _processId;
+
try {
- log(4) << "dist_lock about to aquire lock: " << lockDetails << endl;
- conn->update( _ns , queryBuilder.obj() , whatIWant );
+ // Main codepath to acquire lock
+
+ log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
+ << lockDetails.jsonString(Strict, true) << "\n"
+ << query.jsonString(Strict, true) << endl;
+
+ conn->update( locksNS , query , whatIWant );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
- BSONObj o = conn->getLastErrorDetailed();
- now = conn->findOne( _ns , _id );
+ currLock = conn->findOne( locksNS , _id );
- if ( o["n"].numberInt() == 0 ) {
- *other = now;
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
+ *other = currLock;
other->getOwned();
- log() << "dist_lock error trying to aquire lock: " << lockDetails << " error: " << o << endl;
gotLock = false;
}
else {
@@ -277,63 +713,234 @@ namespace mongo {
}
catch ( UpdateNotTheSame& up ) {
+
// this means our update got through on some, but not others
- log(4) << "dist_lock lock did not propagate properly" << endl;
+ warning() << "distributed lock '" << lockName << " did not propagate properly." << causedBy( up ) << endl;
+
+ // Overall protection derives from:
+ // All unlocking updates use the ts value when setting state to 0
+ // This ensures that during locking, we can override all smaller ts locks with
+ // our own safe ts value and not be unlocked afterward.
+ for ( unsigned i = 0; i < up.size(); i++ ) {
+
+ ScopedDbConnection indDB( up[i].first );
+ BSONObj indUpdate;
+
+ try {
+
+ indUpdate = indDB->findOne( locksNS , _id );
+
+ // If we override this lock in any way, grab and protect it.
+ // We assume/ensure that if a process does not have all lock documents, it is no longer
+ // holding the lock.
+ // Note - finalized locks may compete too, but we know they've won already if competing
+ // in this round. Cleanup of crashes during finalizing may take a few tries.
+ if( indUpdate["ts"] < lockDetails["ts"] || indUpdate["state"].numberInt() == 0 ) {
+
+ BSONObj grabQuery = BSON( "_id" << _id["_id"].String() << "ts" << indUpdate["ts"].OID() );
+
+ // Change ts so we won't be forced, state so we won't be relocked
+ BSONObj grabChanges = BSON( "ts" << lockDetails["ts"].OID() << "state" << 1 );
+
+ // Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
+ // process grabbed the lock (which will change the ts), but the lock will be set until forcing
+ indDB->update( locksNS, grabQuery, BSON( "$set" << grabChanges ) );
+
+ indUpdate = indDB->findOne( locksNS, _id );
+
+ // Our lock should now be set until forcing.
+ assert( indUpdate["state"].numberInt() == 1 );
+
+ }
+ // else our lock is the same, in which case we're safe, or it's a bigger lock,
+ // in which case we won't need to protect anything since we won't have the lock.
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "distributed lock " << lockName
+ << " had errors communicating with individual server "
+ << up[1].first << causedBy( e ), 13661 );
+ }
- for ( unsigned i=0; i<up.size(); i++ ) {
- ScopedDbConnection temp( up[i].first );
- BSONObj temp2 = temp->findOne( _ns , _id );
+ assert( !indUpdate.isEmpty() );
- if ( now.isEmpty() || now["ts"] < temp2["ts"] ) {
- now = temp2.getOwned();
+ // Find max TS value
+ if ( currLock.isEmpty() || currLock["ts"] < indUpdate["ts"] ) {
+ currLock = indUpdate.getOwned();
}
- temp.done();
+ indDB.done();
+
}
- if ( now["ts"].OID() == ts ) {
- log(4) << "dist_lock completed lock propagation" << endl;
+ // Locks on all servers are now set and safe until forcing
+
+ if ( currLock["ts"] == lockDetails["ts"] ) {
+ log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
gotLock = true;
- conn->update( _ns , _id , whatIWant );
}
else {
- log() << "dist_lock error trying to complete propagation" << endl;
+ log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
+
+ // Register the lock for deletion, to speed up failover
+ // Not strictly necessary, but helpful
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
gotLock = false;
}
}
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception creating distributed lock "
+ << lockName << causedBy( e ), 13663 );
+ }
- conn.done();
+ // Complete lock propagation
+ if( gotLock ) {
+
+ // This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
+ // least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
+ // The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
+ // indicates the lock is active, but this means the process creating/destroying them must explicitly poll
+ // when something goes wrong.
+ try {
+
+ BSONObjBuilder finalLockDetails;
+ BSONObjIterator bi( lockDetails );
+ while( bi.more() ) {
+ BSONElement el = bi.next();
+ if( (string) ( el.fieldName() ) == "state" )
+ finalLockDetails.append( "state", 2 );
+ else finalLockDetails.append( el );
+ }
+
+ conn->update( locksNS , _id , BSON( "$set" << finalLockDetails.obj() ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ currLock = conn->findOne( locksNS , _id );
- log(2) << "dist_lock lock gotLock: " << gotLock << " now: " << now << endl;
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ warning() << "could not finalize winning lock " << lockName
+ << ( !errMsg.empty() ? causedBy( errMsg ) : " (did not update lock) " ) << endl;
+ gotLock = false;
+ }
+ else {
+ // SUCCESS!
+ gotLock = true;
+ }
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+
+ // Register the bad final lock for deletion, in case it exists
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
+ throw LockException( str::stream() << "exception finalizing winning lock"
+ << causedBy( e ), 13662 );
+ }
+
+ }
+
+ *other = currLock;
+ other->getOwned();
+
+ // Log our lock results
+ if(gotLock)
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
+ else
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
+
+ conn.done();
return gotLock;
}
- void DistributedLock::unlock() {
+ // Unlock now takes an optional pointer to the lock, so you can be specific about which
+ // particular lock you want to unlock. This is required when the config server is down,
+ // and so cannot tell you what lock ts you should try later.
+ void DistributedLock::unlock( BSONObj* oldLockPtr ) {
+
+ assert( _name != "" );
+
+ string lockName = _name + string("/") + _processId;
+
const int maxAttempts = 3;
int attempted = 0;
+
+ BSONObj oldLock;
+ if( oldLockPtr ) oldLock = *oldLockPtr;
+
while ( ++attempted <= maxAttempts ) {
+ ScopedDbConnection conn( _conn );
+
try {
- ScopedDbConnection conn( _conn );
- conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
- log(2) << "dist_lock unlock: " << conn->findOne( _ns , _id ) << endl;
- conn.done();
- return;
+ if( oldLock.isEmpty() )
+ oldLock = conn->findOne( locksNS, _id );
+
+ if( oldLock["state"].eoo() || oldLock["state"].numberInt() != 2 || oldLock["ts"].eoo() ) {
+ warning() << "cannot unlock invalid distributed lock " << oldLock << endl;
+ conn.done();
+ break;
+ }
+ // Use ts when updating lock, so that new locks can be sure they won't get trampled.
+ conn->update( locksNS ,
+ BSON( "_id" << _id["_id"].String() << "ts" << oldLock["ts"].OID() ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+ // Check that the lock was actually unlocked... if not, try again
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ){
+ warning() << "distributed lock unlock update failed, retrying "
+ << ( errMsg.empty() ? causedBy( "( update not registered )" ) : causedBy( errMsg ) ) << endl;
+ conn.done();
+ continue;
+ }
+
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
+ conn.done();
+ return;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
+ conn.done();
+ break;
}
catch ( std::exception& e) {
- log( LL_WARNING ) << "dist_lock " << _name << " failed to contact config server in unlock attempt "
- << attempted << ": " << e.what() << endl;
+ warning() << "distributed lock '" << lockName << "' failed unlock attempt."
+ << causedBy( e ) << endl;
- sleepsecs(1 << attempted);
+ conn.done();
+ // TODO: If our lock timeout is small, sleeping this long may be unsafe.
+ if( attempted != maxAttempts) sleepsecs(1 << attempted);
}
}
- log( LL_WARNING ) << "dist_lock couldn't consumate unlock request. " << "Lock " << _name
- << " will be taken over after " << _takeoverMinutes << " minutes timeout" << endl;
+ if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
+
+ log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
+ << ", will attempt again later" << endl;
+
+ // We couldn't unlock the lock at all, so try again later in the pinging thread...
+ distLockPinger.addUnlockOID( oldLock["ts"].OID() );
+ }
+ else if( attempted > maxAttempts ) {
+ warning() << "could not unlock untracked distributed lock, a manual force may be required" << endl;
+ }
+
+ warning() << "distributed lock '" << lockName << "' couldn't consummate unlock request. "
+ << "lock may be taken over after " << ( _lockTimeout / (60 * 1000) )
+ << " minutes timeout." << endl;
}
+
+
}
diff --git a/client/distlock.h b/client/distlock.h
index 753a241..8985672 100644
--- a/client/distlock.h
+++ b/client/distlock.h
@@ -23,9 +23,42 @@
#include "redef_macros.h"
#include "syncclusterconnection.h"
+#define LOCK_TIMEOUT (15 * 60 * 1000)
+#define LOCK_SKEW_FACTOR (30)
+#define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define NUM_LOCK_SKEW_CHECKS (3)
+
+// The maximum clock skew we need to handle between config servers is
+// 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW.
+
+// Net effect of *this* clock being slow is effectively a multiplier on the max net skew
+// and a linear increase or decrease of the max clock skew.
+
namespace mongo {
/**
+ * Exception class to encapsulate exceptions while managing distributed locks
+ */
+ class LockException : public DBException {
+ public:
+ LockException( const char * msg , int code ) : DBException( msg, code ) {}
+ LockException( const string& msg, int code ) : DBException( msg, code ) {}
+ virtual ~LockException() throw() { }
+ };
+
+ /**
+ * Indicates an error in retrieving time values from remote servers.
+ */
+ class TimeNotFoundException : public LockException {
+ public:
+ TimeNotFoundException( const char * msg , int code ) : LockException( msg, code ) {}
+ TimeNotFoundException( const string& msg, int code ) : LockException( msg, code ) {}
+ virtual ~TimeNotFoundException() throw() { }
+ };
+
+ /**
* The distributed lock is a configdb backed way of synchronizing system-wide tasks. A task must be identified by a
* unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
* collection with that name.
@@ -36,53 +69,155 @@ namespace mongo {
class DistributedLock {
public:
+ static LabeledLevel logLvl;
+
/**
* The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
* Construction does trigger a lock "pinging" mechanism, though.
*
* @param conn address of config(s) server(s)
* @param name identifier for the lock
- * @param takeoverMinutes how long can the log go "unpinged" before a new attempt to lock steals it (in minutes)
+ * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
+ * @param lockPing how long to wait between lock pings
+ * @param legacy use legacy logic
+ *
*/
- DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes = 15 );
+ DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout = 0, bool asProcess = false );
+ ~DistributedLock(){};
/**
- * Attempts to aquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
+ * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
* consider using the dist_lock_try construct to acquire this lock in an exception safe way.
*
* @param why human readable description of why the lock is being taken (used to log)
- * @param other configdb's lock document that is currently holding the lock, if lock is taken
+ * @param whether this is a lock re-entry or a new lock
+ * @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
+ * details if not
* @return true if it managed to grab the lock
*/
- bool lock_try( string why , BSONObj * other = 0 );
+ bool lock_try( const string& why , bool reenter = false, BSONObj * other = 0 );
/**
* Releases a previously taken lock.
*/
- void unlock();
+ void unlock( BSONObj* oldLockPtr = NULL );
+
+ Date_t getRemoteTime();
+
+ bool isRemoteTimeSkewed();
+
+ const string& getProcessId();
+
+ const ConnectionString& getRemoteConnection();
+
+ /**
+ * Check the skew between a cluster of servers
+ */
+ static bool checkSkew( const ConnectionString& cluster, unsigned skewChecks = NUM_LOCK_SKEW_CHECKS, unsigned long long maxClockSkew = MAX_LOCK_CLOCK_SKEW, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ /**
+ * Get the remote time from a server or cluster
+ */
+ static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ static bool killPinger( DistributedLock& lock );
+
+ /**
+ * Namespace for lock pings
+ */
+ static const string lockPingNS;
+
+ /**
+ * Namespace for locks
+ */
+ static const string locksNS;
+
+ const ConnectionString _conn;
+ const string _name;
+ const BSONObj _id;
+ const string _processId;
+
+ // Timeout for lock, usually LOCK_TIMEOUT
+ const unsigned long long _lockTimeout;
+ const unsigned long long _maxClockSkew;
+ const unsigned long long _maxNetSkew;
+ const unsigned long long _lockPing;
private:
- ConnectionString _conn;
- string _name;
- unsigned _takeoverMinutes;
- string _ns;
- BSONObj _id;
+ void resetLastPing(){
+ scoped_lock lk( _mutex );
+ _lastPingCheck = boost::tuple<string, Date_t, Date_t, OID>();
+ }
+
+ mongo::mutex _mutex;
+
+ // Data from last check of process with ping time
+ boost::tuple<string, Date_t, Date_t, OID> _lastPingCheck;
+ // May or may not exist, depending on startup
+ string _threadId;
+
};
class dist_lock_try {
public:
+
+ dist_lock_try() : _lock(NULL), _got(false) {}
+
+ dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got(that._got), _other(that._other) {
+ _other.getOwned();
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+ }
+
+ // Needed so we can handle lock exceptions in context of lock try.
+ dist_lock_try& operator=( const dist_lock_try& that ){
+
+ if( this == &that ) return *this;
+
+ _lock = that._lock;
+ _got = that._got;
+ _other = that._other;
+ _other.getOwned();
+ _why = that._why;
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+
+ return *this;
+ }
+
dist_lock_try( DistributedLock * lock , string why )
- : _lock(lock) {
- _got = _lock->lock_try( why , &_other );
+ : _lock(lock), _why(why) {
+ _got = _lock->lock_try( why , false , &_other );
}
~dist_lock_try() {
if ( _got ) {
- _lock->unlock();
+ assert( ! _other.isEmpty() );
+ _lock->unlock( &_other );
}
}
+ bool reestablish(){
+ return retry();
+ }
+
+ bool retry() {
+ assert( _lock );
+ assert( _got );
+ assert( ! _other.isEmpty() );
+
+ return _got = _lock->lock_try( _why , true, &_other );
+ }
+
bool got() const { return _got; }
BSONObj other() const { return _other; }
@@ -90,6 +225,7 @@ namespace mongo {
DistributedLock * _lock;
bool _got;
BSONObj _other;
+ string _why;
};
}
diff --git a/client/distlock_test.cpp b/client/distlock_test.cpp
index 83d143f..42a1c48 100644
--- a/client/distlock_test.cpp
+++ b/client/distlock_test.cpp
@@ -15,85 +15,123 @@
* limitations under the License.
*/
+#include <iostream>
#include "../pch.h"
#include "dbclient.h"
#include "distlock.h"
#include "../db/commands.h"
+#include "../util/bson_util.h"
+
+// Modify some config options for the RNG, since they cause MSVC to fail
+#include <boost/config.hpp>
+
+#if defined(BOOST_MSVC) && defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS)
+#undef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#define BOOST_RNG_HACK
+#endif
+
+// Well, sort-of cross-platform RNG
+#include <boost/random/mersenne_twister.hpp>
+
+#ifdef BOOST_RNG_HACK
+#define BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#undef BOOST_RNG_HACK
+#endif
+
+
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+// TODO: Make a method in BSONObj if useful, don't modify for now
+#define string_field(obj, name, def) ( obj.hasField(name) ? obj[name].String() : def )
+#define number_field(obj, name, def) ( obj.hasField(name) ? obj[name].Number() : def )
namespace mongo {
- class TestDistLockWithSync : public Command {
+ class TestDistLockWithSync: public Command {
public:
- TestDistLockWithSync() : Command( "_testDistLockWithSyncCluster" ) {}
- virtual void help( stringstream& help ) const {
+ TestDistLockWithSync() :
+ Command("_testDistLockWithSyncCluster") {
+ }
+ virtual void help(stringstream& help) const {
help << "should not be calling this directly" << endl;
}
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
static void runThread() {
- while ( keepGoing ) {
- if ( current->lock_try( "test" ) ) {
+ while (keepGoing) {
+ if (current->lock_try( "test" )) {
count++;
int before = count;
- sleepmillis( 3 );
+ sleepmillis(3);
int after = count;
-
- if ( after != before ) {
- error() << " before: " << before << " after: " << after << endl;
+
+ if (after != before) {
+ error() << " before: " << before << " after: " << after
+ << endl;
}
-
+
current->unlock();
}
}
}
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
Timer t;
- DistributedLock lk( ConnectionString( cmdObj["host"].String() , ConnectionString::SYNC ), "testdistlockwithsync" );
+ DistributedLock lk(ConnectionString(cmdObj["host"].String(),
+ ConnectionString::SYNC), "testdistlockwithsync", 0, 0);
current = &lk;
count = 0;
gotit = 0;
errors = 0;
keepGoing = true;
-
+
vector<shared_ptr<boost::thread> > l;
- for ( int i=0; i<4; i++ ) {
- l.push_back( shared_ptr<boost::thread>( new boost::thread( runThread ) ) );
+ for (int i = 0; i < 4; i++) {
+ l.push_back(
+ shared_ptr<boost::thread> (new boost::thread(runThread)));
}
-
+
int secs = 10;
- if ( cmdObj["secs"].isNumber() )
+ if (cmdObj["secs"].isNumber())
secs = cmdObj["secs"].numberInt();
- sleepsecs( secs );
+ sleepsecs(secs);
keepGoing = false;
- for ( unsigned i=0; i<l.size(); i++ )
+ for (unsigned i = 0; i < l.size(); i++)
l[i]->join();
current = 0;
- result.append( "count" , count );
- result.append( "gotit" , gotit );
- result.append( "errors" , errors );
- result.append( "timeMS" , t.millis() );
+ result.append("count", count);
+ result.append("gotit", gotit);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
return errors == 0;
}
-
+
// variables for test
static DistributedLock * current;
static int gotit;
static int errors;
static AtomicUInt count;
-
+
static bool keepGoing;
} testDistLockWithSyncCmd;
-
DistributedLock * TestDistLockWithSync::current;
AtomicUInt TestDistLockWithSync::count;
int TestDistLockWithSync::gotit;
@@ -101,4 +139,300 @@ namespace mongo {
bool TestDistLockWithSync::keepGoing;
+
+ class TestDistLockWithSkew: public Command {
+ public:
+
+ static const int logLvl = 1;
+
+ TestDistLockWithSkew() :
+ Command("_testDistLockWithSkew") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ void runThread(ConnectionString& hostConn, unsigned threadId, unsigned seed,
+ BSONObj& cmdObj, BSONObjBuilder& result) {
+
+ stringstream ss;
+ ss << "thread-" << threadId;
+ setThreadName(ss.str().c_str());
+
+ // Lock name
+ string lockName = string_field(cmdObj, "lockName", this->name + "_lock");
+
+ // Range of clock skew in diff threads
+ int skewRange = (int) number_field(cmdObj, "skewRange", 1);
+
+ // How long to wait with the lock
+ int threadWait = (int) number_field(cmdObj, "threadWait", 30);
+ if(threadWait <= 0) threadWait = 1;
+
+ // Max amount of time (ms) a thread waits before checking the lock again
+ int threadSleep = (int) number_field(cmdObj, "threadSleep", 30);
+ if(threadSleep <= 0) threadSleep = 1;
+
+ // How long until the lock is forced in ms, only compared locally
+ unsigned long long takeoverMS = (unsigned long long) number_field(cmdObj, "takeoverMS", 0);
+
+ // Whether or not we should hang some threads
+ int hangThreads = (int) number_field(cmdObj, "hangThreads", 0);
+
+
+ boost::mt19937 gen((boost::mt19937::result_type) seed);
+
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSkew(gen, boost::uniform_int<>(0, skewRange));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomWait(gen, boost::uniform_int<>(1, threadWait));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSleep(gen, boost::uniform_int<>(1, threadSleep));
+
+
+ int skew = 0;
+ if (!lock.get()) {
+
+ // Pick a skew, but the first two threads skew the whole range
+ if(threadId == 0)
+ skew = -skewRange / 2;
+ else if(threadId == 1)
+ skew = skewRange / 2;
+ else skew = randomSkew() - (skewRange / 2);
+
+ // Skew this thread
+ jsTimeVirtualThreadSkew( skew );
+
+ log() << "Initializing lock with skew of " << skew << " for thread " << threadId << endl;
+
+ lock.reset(new DistributedLock(hostConn, lockName, takeoverMS, true ));
+
+ log() << "Skewed time " << jsTime() << " for thread " << threadId << endl
+ << " max wait (with lock: " << threadWait << ", after lock: " << threadSleep << ")" << endl
+ << " takeover in " << takeoverMS << "(ms remote)" << endl;
+
+ }
+
+ DistributedLock* myLock = lock.get();
+
+ bool errors = false;
+ BSONObj lockObj;
+ while (keepGoing) {
+ try {
+
+ if (myLock->lock_try("Testing distributed lock with skew.", false, &lockObj )) {
+
+ log() << "**** Locked for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+
+ if( count % 2 == 1 && ! myLock->lock_try( "Testing lock re-entry.", true ) ) {
+ errors = true;
+ log() << "**** !Could not re-enter lock already held" << endl;
+ break;
+ }
+
+ if( count % 3 == 1 && myLock->lock_try( "Testing lock non-re-entry.", false ) ) {
+ errors = true;
+ log() << "**** !Invalid lock re-entry" << endl;
+ break;
+ }
+
+ count++;
+ int before = count;
+ int sleep = randomWait();
+ sleepmillis(sleep);
+ int after = count;
+
+ if(after != before) {
+ errors = true;
+ log() << "**** !Bad increment while sleeping with lock for: " << sleep << "ms" << endl;
+ break;
+ }
+
+ // Unlock only half the time...
+ if(hangThreads == 0 || threadId % hangThreads != 0) {
+ log() << "**** Unlocking for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+ myLock->unlock( &lockObj );
+ }
+ else {
+ log() << "**** Not unlocking for thread " << threadId << endl;
+ DistributedLock::killPinger( *myLock );
+ // We're simulating a crashed process...
+ break;
+ }
+ }
+
+ }
+ catch( LockException& e ) {
+ log() << "*** !Could not try distributed lock." << causedBy( e ) << endl;
+ break;
+ }
+
+ sleepmillis(randomSleep());
+ }
+
+ result << "errors" << errors
+ << "skew" << skew
+ << "takeover" << (long long) takeoverMS
+ << "localTimeout" << (takeoverMS > 0);
+
+ }
+
+ void test(ConnectionString& hostConn, string& lockName, unsigned seed) {
+ return;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ Timer t;
+
+ ConnectionString hostConn(cmdObj["host"].String(),
+ ConnectionString::SYNC);
+
+ unsigned seed = (unsigned) number_field(cmdObj, "seed", 0);
+ int numThreads = (int) number_field(cmdObj, "numThreads", 4);
+ int wait = (int) number_field(cmdObj, "wait", 10000);
+
+ log() << "Starting " << this->name << " with -" << endl
+ << " seed: " << seed << endl
+ << " numThreads: " << numThreads << endl
+ << " total wait: " << wait << endl << endl;
+
+ // Skew host clocks if needed
+ try {
+ skewClocks( hostConn, cmdObj );
+ }
+ catch( DBException e ) {
+ errmsg = str::stream() << "Clocks could not be skewed." << causedBy( e );
+ return false;
+ }
+
+ count = 0;
+ keepGoing = true;
+
+ vector<shared_ptr<boost::thread> > threads;
+ vector<shared_ptr<BSONObjBuilder> > results;
+ for (int i = 0; i < numThreads; i++) {
+ results.push_back(shared_ptr<BSONObjBuilder> (new BSONObjBuilder()));
+ threads.push_back(shared_ptr<boost::thread> (new boost::thread(
+ boost::bind(&TestDistLockWithSkew::runThread, this,
+ hostConn, (unsigned) i, seed + i, boost::ref(cmdObj),
+ boost::ref(*(results[i].get()))))));
+ }
+
+ sleepsecs(wait / 1000);
+ keepGoing = false;
+
+ bool errors = false;
+ for (unsigned i = 0; i < threads.size(); i++) {
+ threads[i]->join();
+ errors = errors || results[i].get()->obj()["errors"].Bool();
+ }
+
+ result.append("count", count);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
+
+ return !errors;
+
+ }
+
+ /**
+ * Skews the clocks of a remote cluster by a particular amount, specified by
+ * the "skewHosts" element in a BSONObj.
+ */
+ static void skewClocks( ConnectionString& cluster, BSONObj& cmdObj ) {
+
+ vector<long long> skew;
+ if(cmdObj.hasField("skewHosts")) {
+ bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
+ }
+ else {
+ log( logLvl ) << "No host clocks to skew." << endl;
+ return;
+ }
+
+ log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
+
+ unsigned s = 0;
+ for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
+
+ ConnectionString server( cluster.getServers()[s] );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ try {
+ bool success = conn->runCommand( string("admin"), BSON( "_skewClockCommand" << 1 << "skew" << *i ), result );
+
+ uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
+
+ log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ }
+
+ }
+
+ // variables for test
+ thread_specific_ptr<DistributedLock> lock;
+ AtomicUInt count;
+ bool keepGoing;
+
+ } testDistLockWithSkewCmd;
+
+
+ /**
+ * Utility command to virtually skew the clock of a mongo server a particular amount.
+ * This skews the clock globally, per-thread skew is also possible.
+ */
+ class SkewClockCommand: public Command {
+ public:
+ SkewClockCommand() :
+ Command("_skewClockCommand") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ long long skew = (long long) number_field(cmdObj, "skew", 0);
+
+ log() << "Adjusting jsTime() clock skew to " << skew << endl;
+
+ jsTimeVirtualSkew( skew );
+
+ log() << "JSTime adjusted, now is " << jsTime() << endl;
+
+ return true;
+
+ }
+
+ } testSkewClockCommand;
+
}
+
diff --git a/client/examples/clientTest.cpp b/client/examples/clientTest.cpp
index 96c014e..aaea6bd 100644
--- a/client/examples/clientTest.cpp
+++ b/client/examples/clientTest.cpp
@@ -246,5 +246,34 @@ int main( int argc, const char **argv ) {
//MONGO_PRINT(out);
}
+ {
+ // test timeouts
+
+ DBClientConnection conn( true , 0 , 2 );
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+ conn.insert( "test.totest" , BSON( "x" << 1 ) );
+ BSONObj res;
+
+ bool gotError = false;
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+ try {
+ conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res );
+ }
+ catch ( std::exception& e ) {
+ gotError = true;
+ log() << e.what() << endl;
+ }
+ assert( gotError );
+ // sleep so the server isn't locked anymore
+ sleepsecs( 4 );
+
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+
+
+ }
+
cout << "client test finished!" << endl;
}
diff --git a/client/examples/httpClientTest.cpp b/client/examples/httpClientTest.cpp
index 4fa5fd8..4055d44 100644
--- a/client/examples/httpClientTest.cpp
+++ b/client/examples/httpClientTest.cpp
@@ -18,10 +18,27 @@
#include <iostream>
#include "client/dbclient.h"
-#include "util/httpclient.h"
+#include "util/net/httpclient.h"
using namespace mongo;
+void play( string url ) {
+ cout << "[" << url << "]" << endl;
+
+ HttpClient c;
+ HttpClient::Result r;
+ MONGO_assert( c.get( url , &r ) == 200 );
+
+ HttpClient::Headers h = r.getHeaders();
+ MONGO_assert( h["Content-Type"].find( "text/html" ) == 0 );
+
+ cout << "\tHeaders" << endl;
+ for ( HttpClient::Headers::iterator i = h.begin() ; i != h.end(); ++i ) {
+ cout << "\t\t" << i->first << "\t" << i->second << endl;
+ }
+
+}
+
int main( int argc, const char **argv ) {
int port = 27017;
@@ -32,12 +49,10 @@ int main( int argc, const char **argv ) {
}
port += 1000;
- stringstream ss;
- ss << "http://localhost:" << port << "/";
- string url = ss.str();
-
- cout << "[" << url << "]" << endl;
-
- HttpClient c;
- MONGO_assert( c.get( url ) == 200 );
+ play( str::stream() << "http://localhost:" << port << "/" );
+
+#ifdef MONGO_SSL
+ play( "https://www.10gen.com/" );
+#endif
+
}
diff --git a/client/examples/insert_demo.cpp b/client/examples/insert_demo.cpp
new file mode 100644
index 0000000..14ac79e
--- /dev/null
+++ b/client/examples/insert_demo.cpp
@@ -0,0 +1,47 @@
+/*
+ C++ client program which inserts documents in a MongoDB database.
+
+ How to build and run:
+
+ Using mongo_client_lib.cpp:
+ g++ -I .. -I ../.. insert_demo.cpp ../mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
+ ./a.out
+*/
+
+#include <iostream>
+#include "dbclient.h" // the mongo c++ driver
+
+using namespace std;
+using namespace mongo;
+using namespace bson;
+
+int main() {
+ try {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+
+ bo o = BSON( "hello" << "world" );
+
+ cout << "inserting..." << endl;
+
+ time_t start = time(0);
+ for( unsigned i = 0; i < 1000000; i++ ) {
+ c.insert("test.foo", o);
+ }
+
+ // wait until all operations applied
+ cout << "getlasterror returns: \"" << c.getLastError() << '"' << endl;
+
+ time_t done = time(0);
+ time_t dt = done-start;
+ cout << dt << " seconds " << 1000000/dt << " per second" << endl;
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/client/examples/rs.cpp b/client/examples/rs.cpp
index 7813ec6..3307d87 100644
--- a/client/examples/rs.cpp
+++ b/client/examples/rs.cpp
@@ -21,11 +21,62 @@
#include "client/dbclient.h"
#include <iostream>
+#include <vector>
using namespace mongo;
using namespace std;
+void workerThread( string collName , bool print , DBClientReplicaSet * conn ) {
+
+ while ( true ) {
+ try {
+ conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
+
+ BSONObj x = conn->findOne( collName , BSONObj() );
+
+ if ( print ) {
+ cout << x << endl;
+ }
+
+ BSONObj a = conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+ BSONObj b = conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+
+ if ( print ) {
+ cout << "\t A " << a << endl;
+ cout << "\t B " << b << endl;
+ }
+ }
+ catch ( std::exception& e ) {
+ cout << "ERROR: " << e.what() << endl;
+ }
+ sleepmillis( 10 );
+ }
+}
+
int main( int argc , const char ** argv ) {
+
+ unsigned nThreads = 1;
+ bool print = false;
+ bool testTimeout = false;
+
+ for ( int i=1; i<argc; i++ ) {
+ if ( mongoutils::str::equals( "--threads" , argv[i] ) ) {
+ nThreads = atoi( argv[++i] );
+ }
+ else if ( mongoutils::str::equals( "--print" , argv[i] ) ) {
+ print = true;
+ }
+ // Run a special mode to demonstrate the DBClientReplicaSet so_timeout option.
+ else if ( mongoutils::str::equals( "--testTimeout" , argv[i] ) ) {
+ testTimeout = true;
+ }
+ else {
+ cerr << "unknown option: " << argv[i] << endl;
+ return 1;
+ }
+
+ }
+
string errmsg;
ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
if ( ! cs.isValid() ) {
@@ -33,7 +84,7 @@ int main( int argc , const char ** argv ) {
return 1;
}
- DBClientReplicaSet * conn = (DBClientReplicaSet*)cs.connect( errmsg );
+ DBClientReplicaSet * conn = dynamic_cast<DBClientReplicaSet*>(cs.connect( errmsg, testTimeout ? 10 : 0 ));
if ( ! conn ) {
cout << "error connecting: " << errmsg << endl;
return 2;
@@ -42,17 +93,26 @@ int main( int argc , const char ** argv ) {
string collName = "test.rs1";
conn->dropCollection( collName );
- while ( true ) {
+
+ if ( testTimeout ) {
+ conn->insert( collName, BSONObj() );
try {
- conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
- cout << conn->findOne( collName , BSONObj() ) << endl;
- cout << "\t A" << conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
- cout << "\t B " << conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
- }
- catch ( std::exception& e ) {
- cout << "ERROR: " << e.what() << endl;
+ conn->count( collName, BSON( "$where" << "sleep(40000)" ) );
+ } catch( DBException& ) {
+ return 0;
}
- sleepsecs( 1 );
+ cout << "expected socket exception" << endl;
+ return 1;
+ }
+
+ vector<boost::shared_ptr<boost::thread> > threads;
+ for ( unsigned i=0; i<nThreads; i++ ) {
+ string errmsg;
+ threads.push_back( boost::shared_ptr<boost::thread>( new boost::thread( boost::bind( workerThread , collName , print , (DBClientReplicaSet*)cs.connect(errmsg) ) ) ) );
+ }
+
+ for ( unsigned i=0; i<threads.size(); i++ ) {
+ threads[i]->join();
}
}
diff --git a/client/examples/simple_client_demo.vcxproj b/client/examples/simple_client_demo.vcxproj
new file mode 100755
index 0000000..4658a42
--- /dev/null
+++ b/client/examples/simple_client_demo.vcxproj
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{89C30BC3-2874-4F2C-B4DA-EB04E9782236}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>simple_client_demo</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>..\..;..\..\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Cond