summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore16
-rwxr-xr-x[-rw-r--r--]README94
-rw-r--r--SConstruct854
-rw-r--r--bson/README7
-rw-r--r--bson/bson.h123
-rw-r--r--bson/bson_db.h70
-rw-r--r--bson/bsondemo/bsondemo.cpp107
-rw-r--r--bson/bsondemo/bsondemo.vcproj (renamed from msvc/mongos/mongos.vcproj)105
-rw-r--r--bson/bsondemo/bsondemo.vcxproj193
-rw-r--r--bson/bsondemo/bsondemo.vcxproj.filters52
-rw-r--r--bson/bsonelement.h549
-rw-r--r--bson/bsoninlines.h597
-rw-r--r--bson/bsonmisc.h195
-rw-r--r--bson/bsonobj.h394
-rw-r--r--bson/bsonobjbuilder.h749
-rw-r--r--bson/bsonobjiterator.h131
-rw-r--r--bson/bsontypes.h107
-rw-r--r--bson/inline_decls.h33
-rw-r--r--bson/oid.h113
-rw-r--r--bson/ordering.h66
-rw-r--r--bson/stringdata.h64
-rw-r--r--bson/util/atomic_int.h (renamed from util/atomic_int.h)3
-rw-r--r--bson/util/builder.h (renamed from util/builder.h)131
-rw-r--r--bson/util/misc.h94
-rw-r--r--buildscripts/bb.py2
-rw-r--r--buildscripts/buildboost.bat54
-rw-r--r--buildscripts/buildboost64.bat61
-rw-r--r--buildscripts/cleanbb.py59
-rw-r--r--buildscripts/confluence_export.py29
-rw-r--r--buildscripts/distmirror.py139
-rw-r--r--buildscripts/docs.py120
-rwxr-xr-x[-rw-r--r--]buildscripts/errorcodes.py59
-rw-r--r--buildscripts/makealldists.py289
-rw-r--r--buildscripts/makedist.py645
-rw-r--r--buildscripts/mergerepositories.py194
-rwxr-xr-xbuildscripts/smoke.py522
-rw-r--r--buildscripts/utils.py80
-rw-r--r--client/clientOnly.cpp18
-rw-r--r--client/connpool.cpp171
-rw-r--r--client/connpool.h88
-rw-r--r--client/constants.h26
-rw-r--r--client/dbclient.cpp722
-rw-r--r--client/dbclient.h543
-rw-r--r--client/dbclientcursor.cpp232
-rw-r--r--client/dbclientcursor.h204
-rw-r--r--client/distlock.cpp225
-rw-r--r--client/distlock.h91
-rw-r--r--client/distlock_test.cpp80
-rw-r--r--client/examples/clientTest.cpp21
-rw-r--r--client/examples/tail.cpp35
-rw-r--r--client/gridfs.cpp51
-rw-r--r--client/gridfs.h32
-rw-r--r--client/model.cpp53
-rw-r--r--client/model.h7
-rw-r--r--client/parallel.cpp316
-rw-r--r--client/parallel.h176
-rw-r--r--client/redef_macros.h55
-rw-r--r--client/syncclusterconnection.cpp179
-rw-r--r--client/syncclusterconnection.h84
-rw-r--r--client/undef_macros.h58
-rw-r--r--db/btree.cpp284
-rw-r--r--db/btree.h110
-rw-r--r--db/btreecursor.cpp125
-rw-r--r--db/cap.cpp393
-rw-r--r--db/client.cpp258
-rw-r--r--db/client.h87
-rw-r--r--db/clientcursor.cpp165
-rw-r--r--db/clientcursor.h149
-rw-r--r--db/cloner.cpp517
-rw-r--r--db/cmdline.cpp92
-rw-r--r--db/cmdline.h18
-rw-r--r--db/commands.cpp108
-rw-r--r--db/commands.h32
-rw-r--r--db/common.cpp19
-rw-r--r--db/concurrency.h162
-rw-r--r--db/curop.h43
-rw-r--r--db/cursor.cpp2
-rw-r--r--db/cursor.h39
-rw-r--r--db/database.cpp66
-rw-r--r--db/database.h46
-rw-r--r--db/db.cpp500
-rw-r--r--db/db.h25
-rwxr-xr-x[-rw-r--r--]db/db.rc73
-rw-r--r--db/db.sln56
-rw-r--r--db/db.vcproj3776
-rw-r--r--db/db.vcxproj618
-rwxr-xr-xdb/db.vcxproj.filters892
-rw-r--r--db/db_10.sln129
-rw-r--r--db/dbcommands.cpp1018
-rw-r--r--db/dbcommands_admin.cpp79
-rw-r--r--db/dbcommands_generic.cpp222
-rw-r--r--db/dbeval.cpp26
-rw-r--r--db/dbhelpers.cpp190
-rw-r--r--db/dbhelpers.h83
-rw-r--r--db/dbmessage.h96
-rw-r--r--db/dbwebserver.cpp754
-rw-r--r--db/dbwebserver.h90
-rw-r--r--db/diskloc.h11
-rw-r--r--db/driverHelpers.cpp11
-rw-r--r--db/extsort.cpp6
-rw-r--r--db/extsort.h2
-rw-r--r--db/flushtest.cpp2
-rw-r--r--db/geo/2d.cpp (renamed from db/index_geo2d.cpp)735
-rw-r--r--db/geo/core.h427
-rw-r--r--db/geo/haystack.cpp317
-rw-r--r--db/helpers/dblogger.h31
-rw-r--r--db/index.cpp243
-rw-r--r--db/index.h153
-rw-r--r--db/indexkey.cpp238
-rw-r--r--db/indexkey.h174
-rw-r--r--db/instance.cpp282
-rw-r--r--db/instance.h37
-rw-r--r--db/introspect.cpp4
-rw-r--r--db/introspect.h2
-rw-r--r--db/jsobj.cpp733
-rw-r--r--db/jsobj.h2029
-rw-r--r--db/jsobjmanipulator.h4
-rw-r--r--db/json.cpp75
-rw-r--r--db/json.h5
-rw-r--r--db/lasterror.cpp51
-rw-r--r--db/lasterror.h85
-rw-r--r--db/matcher.cpp218
-rw-r--r--db/matcher.h63
-rw-r--r--db/matcher_covered.cpp80
-rw-r--r--db/module.cpp2
-rw-r--r--db/module.h2
-rw-r--r--db/modules/mms.cpp4
-rwxr-xr-xdb/mongo.icobin0 -> 51262 bytes
-rw-r--r--db/mr.cpp326
-rw-r--r--db/namespace.cpp388
-rw-r--r--db/namespace.h313
-rw-r--r--db/nonce.cpp9
-rw-r--r--db/oplog.cpp601
-rw-r--r--db/oplog.h214
-rw-r--r--db/oplogreader.h109
-rw-r--r--db/pdfile.cpp265
-rw-r--r--db/pdfile.h112
-rw-r--r--db/query.cpp696
-rw-r--r--db/query.h66
-rw-r--r--db/queryoptimizer.cpp431
-rw-r--r--db/queryoptimizer.h312
-rw-r--r--db/queryutil.cpp727
-rw-r--r--db/queryutil.h441
-rw-r--r--db/rec.h20
-rw-r--r--db/reccache.cpp8
-rw-r--r--db/reccache.h26
-rw-r--r--db/reci.h19
-rw-r--r--db/recstore.h2
-rw-r--r--db/repl.cpp678
-rw-r--r--db/repl.h191
-rw-r--r--db/repl/connections.h91
-rw-r--r--db/repl/consensus.cpp342
-rw-r--r--db/repl/health.cpp389
-rw-r--r--db/repl/health.h50
-rw-r--r--db/repl/heartbeat.cpp257
-rw-r--r--db/repl/manager.cpp179
-rw-r--r--db/repl/multicmd.h70
-rw-r--r--db/repl/replset_commands.cpp293
-rw-r--r--db/repl/rs.cpp500
-rw-r--r--db/repl/rs.h415
-rw-r--r--db/repl/rs_config.cpp315
-rw-r--r--db/repl/rs_config.h88
-rwxr-xr-xdb/repl/rs_exception.h17
-rw-r--r--db/repl/rs_initialsync.cpp214
-rw-r--r--db/repl/rs_initiate.cpp238
-rw-r--r--db/repl/rs_member.h91
-rw-r--r--db/repl/rs_optime.h58
-rw-r--r--db/repl/rs_rollback.cpp481
-rw-r--r--db/repl/rs_sync.cpp328
-rw-r--r--db/repl/test.html11
-rw-r--r--db/repl/testing.js42
-rw-r--r--db/repl_block.cpp207
-rw-r--r--db/repl_block.h34
-rw-r--r--db/replpair.h (renamed from db/replset.h)61
-rwxr-xr-x[-rw-r--r--]db/resource.h50
-rw-r--r--db/restapi.cpp310
-rw-r--r--db/scanandorder.h38
-rw-r--r--db/security.cpp6
-rw-r--r--db/security.h6
-rw-r--r--db/security_commands.cpp42
-rw-r--r--db/stats/counters.cpp34
-rw-r--r--db/stats/counters.h38
-rw-r--r--db/stats/fine_clock.h66
-rw-r--r--db/stats/service_stats.cpp68
-rw-r--r--db/stats/service_stats.h66
-rw-r--r--db/stats/snapshots.cpp108
-rw-r--r--db/stats/snapshots.h3
-rw-r--r--db/stats/top.cpp19
-rw-r--r--db/stats/top.h4
-rw-r--r--db/storage.cpp22
-rw-r--r--db/tests.cpp2
-rw-r--r--db/update.cpp376
-rw-r--r--db/update.h34
-rw-r--r--dbtests/basictests.cpp160
-rw-r--r--dbtests/btreetests.cpp178
-rw-r--r--dbtests/clienttests.cpp12
-rw-r--r--dbtests/commandtests.cpp98
-rw-r--r--dbtests/cursortests.cpp173
-rw-r--r--dbtests/dbtests.cpp2
-rw-r--r--dbtests/dbtests.h1
-rw-r--r--dbtests/framework.cpp48
-rw-r--r--dbtests/framework.h25
-rw-r--r--dbtests/histogram_test.cpp94
-rw-r--r--dbtests/jsobjtests.cpp284
-rw-r--r--dbtests/jsontests.cpp96
-rw-r--r--dbtests/jstests.cpp96
-rw-r--r--dbtests/macrotests.cpp47
-rw-r--r--dbtests/matchertests.cpp11
-rw-r--r--dbtests/mockdbclient.h14
-rw-r--r--dbtests/namespacetests.cpp87
-rw-r--r--dbtests/pairingtests.cpp6
-rw-r--r--dbtests/pdfiletests.cpp10
-rw-r--r--dbtests/perf/perftest.cpp10
-rw-r--r--dbtests/queryoptimizertests.cpp785
-rw-r--r--dbtests/querytests.cpp83
-rw-r--r--dbtests/repltests.cpp36
-rw-r--r--dbtests/sharding.cpp2
-rw-r--r--dbtests/socktests.cpp3
-rw-r--r--dbtests/spin_lock_test.cpp115
-rw-r--r--dbtests/test.vcproj1030
-rw-r--r--dbtests/test.vcxproj441
-rwxr-xr-xdbtests/test.vcxproj.filters707
-rw-r--r--dbtests/threadedtests.cpp23
-rw-r--r--dbtests/updatetests.cpp54
-rw-r--r--debian/changelog92
-rw-r--r--debian/files1
-rw-r--r--debian/mongo.18
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--distsrc/THIRD-PARTY-NOTICES25
-rw-r--r--distsrc/client/LICENSE.txt203
-rw-r--r--distsrc/client/SConstruct72
-rw-r--r--docs/building.debian.etch.ec2.md3
-rw-r--r--docs/building.md81
-rw-r--r--docs/building.opensolaris.ec2.md17
-rw-r--r--docs/index.md9
-rw-r--r--doxygenConfig6
-rw-r--r--gch.py7
-rw-r--r--jstests/_fail.js4
-rw-r--r--jstests/_runner.js8
-rw-r--r--jstests/apitest_db.js14
-rw-r--r--jstests/apply_ops1.js51
-rw-r--r--jstests/arrayfind2.js8
-rw-r--r--jstests/capped3.js7
-rw-r--r--jstests/capped6.js82
-rw-r--r--jstests/capped7.js72
-rw-r--r--jstests/clone/clonecollection.js150
-rw-r--r--jstests/conc_update.js45
-rw-r--r--jstests/copydb-auth.js (renamed from jstests/copydb2.js)0
-rw-r--r--jstests/cursor8.js12
-rw-r--r--jstests/cursora.js34
-rw-r--r--jstests/datasize.js8
-rw-r--r--jstests/datasize2.js27
-rw-r--r--jstests/dbadmin.js5
-rw-r--r--jstests/dbcase.js23
-rw-r--r--jstests/dbhash.js9
-rw-r--r--jstests/disk/directoryperdb.js8
-rw-r--r--jstests/disk/repair.js37
-rw-r--r--jstests/disk/repair2.js47
-rw-r--r--jstests/disk/repair3.js52
-rw-r--r--jstests/disk/repair4.js44
-rw-r--r--jstests/distinct_array1.js24
-rw-r--r--jstests/distinct_speed1.js26
-rw-r--r--jstests/drop.js2
-rw-r--r--jstests/evalb.js3
-rw-r--r--jstests/explain2.js2
-rw-r--r--jstests/find_and_modify.js4
-rw-r--r--jstests/find_and_modify2.js10
-rw-r--r--jstests/find_and_modify3.js21
-rw-r--r--jstests/find_and_modify4.js55
-rw-r--r--jstests/fm4.js16
-rw-r--r--jstests/geo2.js1
-rw-r--r--jstests/geo3.js2
-rw-r--r--jstests/geo_box3.js36
-rw-r--r--jstests/geo_circle2.js23
-rw-r--r--jstests/geo_circle3.js28
-rw-r--r--jstests/geo_circle4.js24
-rw-r--r--jstests/geo_circle5.js28
-rw-r--r--jstests/geo_haystack1.js59
-rw-r--r--jstests/geo_haystack2.js60
-rw-r--r--jstests/geod.js14
-rw-r--r--jstests/geoe.js32
-rw-r--r--jstests/group6.js31
-rw-r--r--jstests/hint1.js4
-rw-r--r--jstests/in3.js4
-rw-r--r--jstests/in4.js53
-rw-r--r--jstests/in5.js56
-rw-r--r--jstests/in6.js13
-rw-r--r--jstests/in7.js6
-rw-r--r--jstests/index1.js6
-rw-r--r--jstests/index10.js4
-rw-r--r--jstests/index6.js2
-rw-r--r--jstests/index7.js18
-rw-r--r--jstests/index_check2.js2
-rw-r--r--jstests/index_check6.js52
-rw-r--r--jstests/index_check8.js15
-rw-r--r--jstests/index_elemmatch1.js28
-rw-r--r--jstests/index_many.js75
-rw-r--r--jstests/index_many2.js29
-rw-r--r--jstests/indexapi.js2
-rw-r--r--jstests/indexe.js2
-rw-r--r--jstests/indexh.js34
-rw-r--r--jstests/maxscan.js14
-rw-r--r--jstests/not2.js15
-rw-r--r--jstests/numberlong.js55
-rw-r--r--jstests/objid5.js12
-rw-r--r--jstests/objid6.js14
-rw-r--r--jstests/objid7.js13
-rw-r--r--jstests/or1.js57
-rw-r--r--jstests/or2.js68
-rw-r--r--jstests/or3.js64
-rw-r--r--jstests/or4.js98
-rw-r--r--jstests/or5.js107
-rw-r--r--jstests/or6.js31
-rw-r--r--jstests/or7.js41
-rw-r--r--jstests/or8.js16
-rw-r--r--jstests/or9.js54
-rw-r--r--jstests/ora.js17
-rw-r--r--jstests/orb.js17
-rw-r--r--jstests/pullall2.js20
-rw-r--r--jstests/ref3.js22
-rw-r--r--jstests/regex5.js22
-rw-r--r--jstests/repair.js2
-rw-r--r--jstests/repl/basic1.js53
-rw-r--r--jstests/repl/block1.js24
-rw-r--r--jstests/repl/block2.js45
-rw-r--r--jstests/repl/mastermaster1.js34
-rw-r--r--jstests/repl/repair.js14
-rw-r--r--jstests/repl/replacePeer2.js14
-rw-r--r--jstests/repl/snapshot2.js13
-rw-r--r--jstests/repl/snapshot3.js17
-rw-r--r--jstests/replsets/replset1.js115
-rw-r--r--jstests/replsets/replset2.js111
-rw-r--r--jstests/replsets/replset3.js56
-rw-r--r--jstests/replsets/replset4.js29
-rw-r--r--jstests/replsets/replset_remove_node.js57
-rw-r--r--jstests/replsets/replsetadd.js31
-rw-r--r--jstests/replsets/replsetarb1.js33
-rw-r--r--jstests/replsets/replsetarb2.js45
-rw-r--r--jstests/replsets/replsetprio1.js53
-rw-r--r--jstests/replsets/replsetrestart1.js57
-rw-r--r--jstests/replsets/replsetrestart2.js46
-rw-r--r--jstests/replsets/rollback.js129
-rw-r--r--jstests/replsets/rollback2.js199
-rw-r--r--jstests/replsets/sync1.js192
-rw-r--r--jstests/replsets/twosets.js36
-rw-r--r--jstests/rs/rs_basic.js177
-rw-r--r--jstests/rs/test_framework.js30
-rw-r--r--jstests/sharding/addshard1.js56
-rw-r--r--jstests/sharding/auto1.js34
-rw-r--r--jstests/sharding/auto2.js105
-rw-r--r--jstests/sharding/bigMapReduce.js17
-rw-r--r--jstests/sharding/count1.js64
-rw-r--r--jstests/sharding/count2.js43
-rw-r--r--jstests/sharding/cursor1.js60
-rw-r--r--jstests/sharding/diffservers1.js2
-rw-r--r--jstests/sharding/error1.js24
-rw-r--r--jstests/sharding/features1.js27
-rw-r--r--jstests/sharding/features2.js53
-rw-r--r--jstests/sharding/features3.js86
-rw-r--r--jstests/sharding/findandmodify1.js42
-rw-r--r--jstests/sharding/key_many.js41
-rw-r--r--jstests/sharding/movePrimary1.js19
-rw-r--r--jstests/sharding/moveshard1.js39
-rw-r--r--jstests/sharding/presplit.js37
-rw-r--r--jstests/sharding/remove1.js16
-rw-r--r--jstests/sharding/rename.js26
-rw-r--r--jstests/sharding/shard1.js14
-rw-r--r--jstests/sharding/shard2.js45
-rw-r--r--jstests/sharding/shard3.js48
-rw-r--r--jstests/sharding/shard6.js75
-rw-r--r--jstests/sharding/sort1.js81
-rw-r--r--jstests/sharding/splitpick.js14
-rw-r--r--jstests/sharding/stats.js60
-rw-r--r--jstests/sharding/sync1.js5
-rw-r--r--jstests/sharding/sync2.js70
-rw-r--r--jstests/sharding/sync3.js10
-rw-r--r--jstests/sharding/sync4.js19
-rw-r--r--jstests/sharding/update1.js19
-rw-r--r--jstests/shellkillop.js2
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/slice1.js68
-rw-r--r--jstests/slowNightly/remove9.js12
-rw-r--r--jstests/slowNightly/run_sharding_passthrough.js94
-rw-r--r--jstests/slowNightly/sharding_balance1.js55
-rw-r--r--jstests/slowNightly/sharding_balance2.js54
-rw-r--r--jstests/slowNightly/sharding_balance3.js57
-rw-r--r--jstests/slowNightly/sharding_balance4.js122
-rw-r--r--jstests/slowNightly/sharding_cursors1.js71
-rw-r--r--jstests/slowNightly/sharding_rs1.js61
-rw-r--r--jstests/slowWeekly/conc_update.js51
-rw-r--r--jstests/slowWeekly/indexbg1.js (renamed from jstests/slow/indexbg1.js)0
-rw-r--r--jstests/slowWeekly/indexbg2.js (renamed from jstests/slow/indexbg2.js)0
-rw-r--r--jstests/slowWeekly/ns1.js (renamed from jstests/slow/ns1.js)0
-rw-r--r--jstests/slowWeekly/query_yield1.js73
-rw-r--r--jstests/slowWeekly/query_yield2.js73
-rw-r--r--jstests/slowWeekly/update_yield1.js78
-rw-r--r--jstests/splitvector.js81
-rw-r--r--jstests/tempCleanup.js16
-rw-r--r--jstests/tool/csv1.js8
-rw-r--r--jstests/tool/exportimport1.js13
-rw-r--r--jstests/tool/files1.js27
-rw-r--r--jstests/update_addToSet2.js11
-rw-r--r--jstests/update_arraymatch4.js18
-rw-r--r--jstests/update_arraymatch5.js15
-rw-r--r--jstests/update_multi4.js18
-rw-r--r--jstests/update_multi5.js17
-rw-r--r--jstests/upsert1.js14
-rw-r--r--jstests/where3.js10
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--mongo.xcodeproj/project.pbxproj134
-rw-r--r--msvc/README54
-rw-r--r--msvc/bin/Debug/README1
-rw-r--r--msvc/bin/Release/README1
-rw-r--r--msvc/core_server/core_server.vcproj240
-rw-r--r--msvc/lib/Debug/README1
-rw-r--r--msvc/lib/Release/README1
-rw-r--r--msvc/mongo.sln138
-rw-r--r--msvc/mongo/mongo.vcproj312
-rw-r--r--msvc/mongo_app.vsprops22
-rw-r--r--msvc/mongo_common/mongo_common.vcproj940
-rw-r--r--msvc/mongo_lib.vsprops22
-rw-r--r--msvc/mongobridge/mongobridge.vcproj296
-rw-r--r--msvc/mongoclient/mongoclient.vcproj240
-rw-r--r--msvc/mongod/mongod.vcproj232
-rw-r--r--msvc/mongodump/mongodump.vcproj296
-rw-r--r--msvc/mongoexport/mongoexport.vcproj296
-rw-r--r--msvc/mongofiles/mongofiles.vcproj296
-rw-r--r--msvc/mongoimportjson/mongoimportjson.vcproj296
-rw-r--r--msvc/mongorestore/mongorestore.vcproj296
-rw-r--r--msvc/server_only/server_only.vcproj362
-rw-r--r--msvc/shard_server/shard_server.vcproj262
-rw-r--r--pch.cpp (renamed from stdafx.cpp)11
-rw-r--r--pch.h (renamed from stdafx.h)153
-rw-r--r--pcre-7.4/config.h2
-rw-r--r--rpm/init.d-mongod6
-rw-r--r--rpm/mongo.spec9
-rw-r--r--rpm/mongod.conf2
-rw-r--r--s/balance.cpp295
-rw-r--r--s/balance.h80
-rw-r--r--s/balancer_policy.cpp296
-rw-r--r--s/balancer_policy.h84
-rw-r--r--s/chunk.cpp1203
-rw-r--r--s/chunk.h251
-rw-r--r--s/commands_admin.cpp746
-rw-r--r--s/commands_public.cpp620
-rw-r--r--s/config.cpp630
-rw-r--r--s/config.h219
-rw-r--r--s/config_migrate.cpp196
-rw-r--r--s/cursors.cpp199
-rw-r--r--s/cursors.h41
-rw-r--r--s/d_logic.cpp503
-rw-r--r--s/d_logic.h139
-rw-r--r--s/d_migrate.cpp970
-rw-r--r--s/d_split.cpp197
-rw-r--r--s/d_state.cpp620
-rw-r--r--s/d_util.cpp9
-rw-r--r--s/d_writeback.cpp80
-rw-r--r--s/dbgrid.vcproj946
-rw-r--r--s/dbgrid.vcxproj770
-rwxr-xr-xs/dbgrid.vcxproj.filters365
-rw-r--r--s/grid.cpp295
-rw-r--r--s/grid.h106
-rw-r--r--s/request.cpp133
-rw-r--r--s/request.h85
-rw-r--r--s/s_only.cpp63
-rw-r--r--s/server.cpp128
-rw-r--r--s/server.h1
-rw-r--r--s/shard.cpp237
-rw-r--r--s/shard.h251
-rw-r--r--s/shardconnection.cpp266
-rw-r--r--s/shardkey.cpp267
-rw-r--r--s/shardkey.h58
-rw-r--r--s/stats.cpp28
-rw-r--r--s/stats.h30
-rw-r--r--s/strategy.cpp254
-rw-r--r--s/strategy.h14
-rw-r--r--s/strategy_shard.cpp201
-rw-r--r--s/strategy_single.cpp185
-rw-r--r--s/util.h120
-rw-r--r--scripting/engine.cpp24
-rw-r--r--scripting/engine.h4
-rw-r--r--scripting/engine_java.cpp2
-rw-r--r--scripting/engine_java.h3
-rw-r--r--scripting/engine_spidermonkey.cpp237
-rw-r--r--scripting/engine_spidermonkey.h2
-rw-r--r--scripting/engine_v8.cpp10
-rw-r--r--scripting/sm_db.cpp254
-rw-r--r--scripting/utils.cpp3
-rw-r--r--scripting/v8_db.cpp148
-rw-r--r--scripting/v8_db.h1
-rw-r--r--scripting/v8_utils.cpp7
-rw-r--r--scripting/v8_utils.h1
-rw-r--r--scripting/v8_wrapper.cpp40
-rw-r--r--shell/collection.js83
-rw-r--r--shell/db.js108
-rw-r--r--shell/dbshell.cpp190
-rw-r--r--shell/mongo.js5
-rw-r--r--shell/mongo_vstudio.cpp3295
-rwxr-xr-xshell/msvc/mongo.icobin0 -> 1078 bytes
-rw-r--r--shell/msvc/mongo.sln20
-rw-r--r--shell/msvc/mongo.vcxproj253
-rw-r--r--shell/msvc/mongo.vcxproj.filters262
-rw-r--r--shell/query.js77
-rw-r--r--shell/servers.js774
-rw-r--r--shell/shell_utils.cpp (renamed from shell/utils.cpp)298
-rw-r--r--shell/utils.js271
-rw-r--r--tools/bridge.cpp29
-rw-r--r--tools/bsondump.cpp132
-rw-r--r--tools/dump.cpp22
-rw-r--r--tools/export.cpp22
-rw-r--r--tools/files.cpp6
-rw-r--r--tools/import.cpp133
-rw-r--r--tools/restore.cpp112
-rw-r--r--tools/sniffer.cpp183
-rw-r--r--tools/stat.cpp115
-rw-r--r--tools/tool.cpp196
-rw-r--r--tools/tool.h32
-rw-r--r--util/allocator.h6
-rw-r--r--util/array.h18
-rw-r--r--util/assert_util.cpp142
-rw-r--r--util/assert_util.h146
-rw-r--r--util/background.cpp71
-rw-r--r--util/background.h71
-rw-r--r--util/base64.cpp2
-rw-r--r--util/concurrency/list.h81
-rw-r--r--util/concurrency/msg.h61
-rw-r--r--util/concurrency/mutex.h179
-rw-r--r--util/concurrency/mvar.h (renamed from util/mvar.h)0
-rw-r--r--util/concurrency/readme.txt15
-rw-r--r--util/concurrency/rwlock.h220
-rw-r--r--util/concurrency/spin_lock.cpp66
-rw-r--r--util/concurrency/spin_lock.h48
-rw-r--r--util/concurrency/task.cpp171
-rw-r--r--util/concurrency/task.h72
-rw-r--r--util/concurrency/thread_pool.cpp138
-rw-r--r--util/concurrency/thread_pool.h (renamed from util/thread_pool.h)8
-rw-r--r--util/concurrency/value.h85
-rw-r--r--util/concurrency/vars.cpp52
-rw-r--r--util/debug_util.cpp2
-rw-r--r--util/debug_util.h52
-rw-r--r--util/embedded_builder.h3
-rw-r--r--util/file.h15
-rw-r--r--util/file_allocator.h95
-rw-r--r--util/goodies.h262
-rw-r--r--util/hashtab.h59
-rw-r--r--util/hex.h32
-rw-r--r--util/histogram.cpp129
-rw-r--r--util/histogram.h128
-rw-r--r--util/hostandport.h142
-rw-r--r--util/httpclient.cpp40
-rw-r--r--util/httpclient.h18
-rw-r--r--util/log.cpp127
-rw-r--r--util/log.h247
-rw-r--r--util/lruishmap.h2
-rw-r--r--util/md5main.cpp2
-rw-r--r--util/message.cpp627
-rw-r--r--util/message.h320
-rw-r--r--util/message_server.h23
-rw-r--r--util/message_server_asio.cpp18
-rw-r--r--util/message_server_port.cpp76
-rw-r--r--util/miniwebserver.cpp89
-rw-r--r--util/miniwebserver.h24
-rw-r--r--util/mmap.cpp152
-rw-r--r--util/mmap.h156
-rw-r--r--util/mmap_mm.cpp6
-rw-r--r--util/mmap_posix.cpp45
-rw-r--r--util/mmap_win.cpp67
-rwxr-xr-xutil/mongoutils/README7
-rw-r--r--util/mongoutils/checksum.h (renamed from msvc/msvc_scripting.cpp)26
-rw-r--r--util/mongoutils/html.h158
-rwxr-xr-xutil/mongoutils/mongoutils.vcxproj73
-rwxr-xr-xutil/mongoutils/mongoutils.vcxproj.filters10
-rw-r--r--util/mongoutils/str.h118
-rwxr-xr-xutil/mongoutils/test.cpp34
-rw-r--r--util/ntservice.cpp138
-rw-r--r--util/ntservice.h2
-rw-r--r--util/optime.h48
-rw-r--r--util/password.cpp92
-rw-r--r--util/password.h61
-rw-r--r--util/processinfo.cpp47
-rw-r--r--util/processinfo.h5
-rw-r--r--util/processinfo_darwin.cpp4
-rw-r--r--util/processinfo_linux2.cpp7
-rw-r--r--util/processinfo_none.cpp2
-rw-r--r--util/processinfo_win32.cpp2
-rw-r--r--util/queue.h6
-rw-r--r--util/ramlog.h142
-rw-r--r--util/ramstore.cpp93
-rw-r--r--util/ramstore.h86
-rw-r--r--util/sock.cpp290
-rw-r--r--util/sock.h234
-rw-r--r--util/stringutils.cpp44
-rw-r--r--util/stringutils.h43
-rw-r--r--util/text.cpp117
-rw-r--r--util/text.h142
-rw-r--r--util/thread_pool.cpp139
-rw-r--r--util/util.cpp101
-rw-r--r--util/version.cpp86
-rw-r--r--util/version.h24
-rw-r--r--util/winutil.h44
-rw-r--r--valgrind.suppressions8
602 files changed, 59037 insertions, 24119 deletions
diff --git a/.gitignore b/.gitignore
index 0d83f60..2c7d1bd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,8 +2,10 @@
.dbshell
.sconsign.dblite
.sconf_temp
+perf.data
*~
+*.swp
*.o
*.os
*.obj
@@ -27,16 +29,22 @@
*.idb
*.pdb
*.manifest
+*.user
*.gch
+*.*sdf
+*.psess
*#
.#*
shell/mongo.cpp
shell/mongo-server.cpp
-db/Debug
-db/oplog*
+*/Debug/
+*/*/Debug/
+*/Release/
+*/*/Release/
db/.gdb*
db/makefile.local
+db/_ReSharper.db
config.log
settings.py
buildinfo.cpp
@@ -49,6 +57,7 @@ log
logs
docs/html
docs/latex
+docs/doxygen
32bit
scratch
@@ -68,16 +77,19 @@ mongoimport
mongosniff
mongobridge
mongostat
+bsondump
*.tgz
*.zip
*.tar.gz
mongodb-*
+mongo-cxx-driver-*
#libs
libmongoclient.*
libmongotestfiles.*
+libmongoshellfiles.*
# examples
firstExample
diff --git a/README b/README
index 0341176..7cf7653 100644..100755
--- a/README
+++ b/README
@@ -1,43 +1,51 @@
-MongoDB README
-
-DOCUMENTATION
-
- http://www.mongodb.org/
-
-COMPONENTS
-
- mongod - The database process.
- mongos - Sharding controller.
- mongo - The database shell (uses interactive javascript).
-
-BUILDING
-
- see docs/building.md
-
-
-RUNNING
-
- ./mongod
-
- runs the database. Use
-
- ./mongod --help
-
- to see command line options.
-
-NOTES
-
- Mongo uses memory mapped files. If built as a 32 bit executable, you will
- not be able to work with large (multi-gigabyte) databases. However, 32 bit
- builds work fine with small development databases.
-
-
-LICENSING
-
- Most MongoDB source files are made available under the terms of the
- GNU Affero General Public License (AGPL). See individual files for
- details.
-
- As an exception, the files in the debian/ directory, the rpm/
- directory, and all subdirectories thereof are made available under
- the terms of the Apache License, version 2.0.
+MongoDB README
+
+DOCUMENTATION
+
+ http://www.mongodb.org/
+
+COMPONENTS
+
+ mongod - The database process.
+ mongos - Sharding controller.
+ mongo - The database shell (uses interactive javascript).
+
+BUILDING
+
+ See docs/building.md, also www.mongodb.org search for "Building".
+
+RUNNING
+
+ For command line options invoke:
+
+ $ ./mongod --help
+
+ To run a single server database:
+
+ $ mkdir /data/db
+ $ ./mongod
+ $
+ $ # The mongo javascript shell connects to localhost and test database by default:
+ $ ./mongo
+ > help
+
+DRIVERS
+
+ Client drivers for most programming languages are available at mongodb.org.
+
+NOTES
+
+ Mongo uses memory mapped files. If built as a 32 bit executable, you will
+ not be able to work with large (multi-gigabyte) databases. However, 32 bit
+ builds work fine with small development databases.
+
+LICENSE
+
+ Most MongoDB source files are made available under the terms of the
+ GNU Affero General Public License (AGPL). See individual files for
+ details.
+
+ As an exception, the files in the client/, debian/, rpm/,
+ utils/mongoutils, and all subdirectories thereof are made available under
+ the terms of the Apache License, version 2.0.
+
diff --git a/SConstruct b/SConstruct
index 65457d2..b759094 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1,6 +1,6 @@
# -*- mode: python; -*-
-# build file for 10gen db
-# this request scons
+# build file for MongoDB
+# this requires scons
# you can get from http://www.scons.org
# then just type scons
@@ -49,7 +49,6 @@ AddOption('--distmod',
metavar='DIR',
help='additional piece for full dist name')
-
AddOption( "--64",
dest="force64",
type="string",
@@ -90,23 +89,6 @@ AddOption( "--static",
help="fully static build")
-AddOption('--java',
- dest='javaHome',
- type='string',
- default="/opt/java/",
- nargs=1,
- action='store',
- metavar='DIR',
- help='java home')
-
-AddOption('--nojni',
- dest='nojni',
- type="string",
- nargs=0,
- action="store",
- help="turn off jni support" )
-
-
AddOption('--usesm',
dest='usesm',
type="string",
@@ -121,13 +103,6 @@ AddOption('--usev8',
action="store",
help="use v8 for javascript" )
-AddOption('--usejvm',
- dest='usejvm',
- type="string",
- nargs=0,
- action="store",
- help="use java for javascript" )
-
AddOption('--asio',
dest='asio',
type="string",
@@ -175,14 +150,14 @@ AddOption( "--extrapath",
type="string",
nargs=1,
action="store",
- help="comma seperated list of add'l paths (--extrapath /opt/foo/,/foo) static linking" )
+ help="comma separated list of add'l paths (--extrapath /opt/foo/,/foo) static linking" )
AddOption( "--extrapathdyn",
dest="extrapathdyn",
type="string",
nargs=1,
action="store",
- help="comma seperated list of add'l paths (--extrapath /opt/foo/,/foo) dynamic linking" )
+ help="comma separated list of add'l paths (--extrapath /opt/foo/,/foo) dynamic linking" )
AddOption( "--extralib",
@@ -190,21 +165,21 @@ AddOption( "--extralib",
type="string",
nargs=1,
action="store",
- help="comma seperated list of libraries (--extralib js_static,readline" )
+ help="comma separated list of libraries (--extralib js_static,readline" )
AddOption( "--staticlib",
dest="staticlib",
type="string",
nargs=1,
action="store",
- help="comma seperated list of libs to link statically (--staticlib js_static,boost_program_options-mt,..." )
+ help="comma separated list of libs to link statically (--staticlib js_static,boost_program_options-mt,..." )
AddOption( "--staticlibpath",
dest="staticlibpath",
type="string",
nargs=1,
action="store",
- help="comma seperated list of dirs to search for staticlib arguments" )
+ help="comma separated list of dirs to search for staticlib arguments" )
AddOption( "--cxx",
dest="cxx",
@@ -228,6 +203,20 @@ AddOption( "--boost-version",
action="store",
help="boost version for linking(1_38)" )
+AddOption( "--cpppath",
+ dest="cpppath",
+ type="string",
+ nargs=1,
+ action="store",
+ help="Include path if you have headers in a nonstandard directory" )
+
+AddOption( "--libpath",
+ dest="libpath",
+ type="string",
+ nargs=1,
+ action="store",
+ help="Library path if you have libraries in a nonstandard directory" )
+
#
# to use CPUPROFILE=/tmp/profile
# to view pprof -gv mongod /tmp/profile
@@ -251,9 +240,26 @@ AddOption("--nostrip",
AddOption("--sharedclient",
dest="sharedclient",
- action="store",
+ action="store_true",
help="build a libmongoclient.so/.dll")
+AddOption("--full",
+ dest="full",
+ action="store_true",
+ help="include client and headers when doing scons install")
+
+AddOption("--smokedbprefix",
+ dest="smokedbprefix",
+ action="store",
+ help="prefix to dbpath et al. for smoke tests")
+
+AddOption( "--pch",
+ dest="usePCH",
+ type="string",
+ nargs=0,
+ action="store",
+ help="use precompiled headers to speed up the build (experimental)" )
+
# --- environment setup ---
def removeIfInList( lst , thing ):
@@ -271,12 +277,12 @@ boostLibs = [ "thread" , "filesystem" , "program_options" ]
onlyServer = len( COMMAND_LINE_TARGETS ) == 0 or ( len( COMMAND_LINE_TARGETS ) == 1 and str( COMMAND_LINE_TARGETS[0] ) in [ "mongod" , "mongos" , "test" ] )
nix = False
-useJavaHome = False
linux = False
linux64 = False
darwin = False
windows = False
freebsd = False
+openbsd = False
solaris = False
force64 = not GetOption( "force64" ) is None
if not force64 and os.getcwd().endswith( "mongo-64" ):
@@ -293,14 +299,14 @@ static = not GetOption( "static" ) is None
debugBuild = ( not GetOption( "debugBuild" ) is None ) or ( not GetOption( "debugBuildAndLogging" ) is None )
debugLogging = not GetOption( "debugBuildAndLogging" ) is None
noshell = not GetOption( "noshell" ) is None
-nojni = not GetOption( "nojni" ) is None
usesm = not GetOption( "usesm" ) is None
usev8 = not GetOption( "usev8" ) is None
-usejvm = not GetOption( "usejvm" ) is None
asio = not GetOption( "asio" ) is None
+usePCH = not GetOption( "usePCH" ) is None
+
justClientLib = (COMMAND_LINE_TARGETS == ['mongoclient'])
env = Environment( MSVS_ARCH=msarch , tools = ["default", "gch"], toolpath = '.' )
@@ -309,9 +315,15 @@ if GetOption( "cxx" ) is not None:
env["CXX"] = GetOption( "cxx" )
env["LIBPATH"] = []
+if GetOption( "libpath" ) is not None:
+ env["LIBPATH"] = [GetOption( "libpath" )]
+
+if GetOption( "cpppath" ) is not None:
+ env["CPPPATH"] = [GetOption( "cpppath" )]
+
if GetOption( "recstore" ) != None:
env.Append( CPPDEFINES=[ "_RECSTORE" ] )
-env.Append( CPPDEFINES=[ "_SCONS" ] )
+env.Append( CPPDEFINES=[ "_SCONS" , "MONGO_EXPOSE_MACROS" ] )
env.Append( CPPPATH=[ "." ] )
@@ -330,13 +342,11 @@ if boostVersion is None:
else:
boostVersion = "-" + boostVersion
-if ( usesm and usejvm ):
- print( "can't say usesm and usejvm at the same time" )
- Exit(1)
-
-if ( not ( usesm or usejvm or usev8 or justClientLib) ):
+if ( not ( usesm or usev8 or justClientLib) ):
usesm = True
+distBuild = len( COMMAND_LINE_TARGETS ) == 1 and ( str( COMMAND_LINE_TARGETS[0] ) == "s3dist" or str( COMMAND_LINE_TARGETS[0] ) == "dist" )
+
extraLibPlaces = []
def addExtraLibs( s ):
@@ -357,15 +367,52 @@ if GetOption( "extralib" ) is not None:
for x in GetOption( "extralib" ).split( "," ):
env.Append( LIBS=[ x ] )
+class InstallSetup:
+ binaries = False
+ clientSrc = False
+ headers = False
+ bannerDir = None
+ headerRoot = "include"
+
+ def __init__(self):
+ self.default()
+
+ def default(self):
+ self.binaries = True
+ self.libraries = False
+ self.clientSrc = False
+ self.headers = False
+ self.bannerDir = None
+ self.headerRoot = "include"
+ self.clientTestsDir = None
+
+ def justClient(self):
+ self.binaries = False
+ self.libraries = False
+ self.clientSrc = True
+ self.headers = True
+ self.bannerDir = "distsrc/client/"
+ self.headerRoot = ""
+ self.clientTestsDir = "client/examples/"
+
+installSetup = InstallSetup()
+if distBuild:
+ installSetup.bannerDir = "distsrc"
+
+if GetOption( "full" ):
+ installSetup.headers = True
+ installSetup.libraries = True
+
+
# ------ SOURCE FILE SETUP -----------
-commonFiles = Split( "stdafx.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp shell/mongo.cpp" )
-commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
- "util/assert_util.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/debug_util.cpp",
- "util/thread_pool.cpp" ]
+commonFiles = Split( "pch.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp shell/mongo.cpp" )
+commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/ramstore.cpp", "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
+ "util/assert_util.cpp" , "util/log.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
+ "util/concurrency/thread_pool.cpp", "util/password.cpp", "util/version.cpp",
+ "util/histogram.cpp", "util/concurrency/spin_lock.cpp", "util/text.cpp" , "util/stringutils.cpp" , "util/processinfo.cpp" ]
commonFiles += Glob( "util/*.c" )
-commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/model.cpp client/parallel.cpp client/syncclusterconnection.cpp" )
-commonFiles += [ "scripting/engine.cpp" , "scripting/utils.cpp" ]
+commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/dbclientcursor.cpp client/model.cpp client/syncclusterconnection.cpp client/distlock.cpp s/shardconnection.cpp" )
#mmap stuff
@@ -382,39 +429,47 @@ else:
commonFiles += [ "util/processinfo_none.cpp" ]
coreDbFiles = [ "db/commands.cpp" ]
-coreServerFiles = [ "util/message_server_port.cpp" , "util/message_server_asio.cpp" ]
+coreServerFiles = [ "util/message_server_port.cpp" ,
+ "client/parallel.cpp" ,
+ "util/miniwebserver.cpp" , "db/dbwebserver.cpp" ,
+ "db/matcher.cpp" , "db/indexkey.cpp" , "db/dbcommands_generic.cpp" ]
+
+if GetOption( "asio" ) != None:
+ coreServerFiles += [ "util/message_server_asio.cpp" ]
-serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/matcher.cpp db/dbeval.cpp db/dbwebserver.cpp db/dbhelpers.cpp db/instance.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/client.cpp db/security.cpp util/miniwebserver.cpp db/storage.cpp db/reccache.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp db/cmdline.cpp" )
-serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/index_*.cpp" )
+serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/repl/rs.cpp db/repl/consensus.cpp db/repl/rs_initiate.cpp db/repl/replset_commands.cpp db/repl/manager.cpp db/repl/health.cpp db/repl/heartbeat.cpp db/repl/rs_config.cpp db/repl/rs_rollback.cpp db/repl/rs_sync.cpp db/repl/rs_initialsync.cpp db/oplog.cpp db/repl_block.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/cap.cpp db/matcher_covered.cpp db/dbeval.cpp db/restapi.cpp db/dbhelpers.cpp db/instance.cpp db/client.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/security.cpp db/storage.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp db/cmdline.cpp" )
-serverOnlyFiles += Glob( "db/dbcommands*.cpp" )
-serverOnlyFiles += Glob( "db/stats/*.cpp" )
+serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/geo/*.cpp" )
+
+serverOnlyFiles += [ "db/dbcommands.cpp" , "db/dbcommands_admin.cpp" ]
+coreServerFiles += Glob( "db/stats/*.cpp" )
serverOnlyFiles += [ "db/driverHelpers.cpp" ]
+scriptingFiles = [ "scripting/engine.cpp" , "scripting/utils.cpp" ]
+
if usesm:
- commonFiles += [ "scripting/engine_spidermonkey.cpp" ]
- nojni = True
+ scriptingFiles += [ "scripting/engine_spidermonkey.cpp" ]
elif usev8:
- commonFiles += [ Glob( "scripting/*v8*.cpp" ) ]
- nojni = True
-elif not (nojni or justClientLib) :
- commonFiles += [ "scripting/engine_java.cpp" ]
+ scriptingFiles += [ Glob( "scripting/*v8*.cpp" ) ]
else:
- commonFiles += [ "scripting/engine_none.cpp" ]
- nojni = True
+ scriptingFiles += [ "scripting/engine_none.cpp" ]
-coreShardFiles = []
-shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/chunk.cpp" , "s/shardkey.cpp" , "s/config.cpp" , "s/s_only.cpp" , "db/cmdline.cpp" ]
-serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" ]
+coreServerFiles += scriptingFiles
+
+coreShardFiles = [ "s/config.cpp" , "s/grid.cpp" , "s/chunk.cpp" , "s/shard.cpp" , "s/shardkey.cpp" ]
+shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" ]
+serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" , "s/d_writeback.cpp" , "s/d_migrate.cpp" , "s/d_state.cpp" , "s/d_split.cpp" , "client/distlock_test.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
modules = []
+moduleNames = []
for x in os.listdir( "db/modules/" ):
if x.find( "." ) >= 0:
continue
print( "adding module: " + x )
+ moduleNames.append( x )
modRoot = "db/modules/" + x + "/"
serverOnlyFiles += Glob( modRoot + "src/*.cpp" )
modBuildFile = modRoot + "build.py"
@@ -423,8 +478,6 @@ for x in os.listdir( "db/modules/" ):
allClientFiles = commonFiles + coreDbFiles + [ "client/clientOnly.cpp" , "client/gridfs.cpp" , "s/d_util.cpp" ];
-allCXXFiles = allClientFiles + coreShardFiles + shardServerFiles + serverOnlyFiles;
-
# ---- other build setup -----
platform = os.sys.platform
@@ -438,23 +491,24 @@ if force32:
if force64:
processor = "x86_64"
-DEFAULT_INSTALl_DIR = "/usr/local"
-installDir = DEFAULT_INSTALl_DIR
+DEFAULT_INSTALL_DIR = "/usr/local"
+installDir = DEFAULT_INSTALL_DIR
nixLibPrefix = "lib"
distName = GetOption( "distname" )
dontReplacePackage = False
-javaHome = GetOption( "javaHome" )
-javaVersion = "i386";
-javaLibs = []
-
-distBuild = len( COMMAND_LINE_TARGETS ) == 1 and ( str( COMMAND_LINE_TARGETS[0] ) == "s3dist" or str( COMMAND_LINE_TARGETS[0] ) == "dist" )
if distBuild:
release = True
+def isDriverBuild():
+ return GetOption( "prefix" ) and GetOption( "prefix" ).find( "mongo-cxx-driver" ) >= 0
+
if GetOption( "prefix" ):
installDir = GetOption( "prefix" )
+ if isDriverBuild():
+ installSetup.justClient()
+
def findVersion( root , choices ):
if not isinstance(root, list):
@@ -478,12 +532,6 @@ if "darwin" == os.sys.platform:
darwin = True
platform = "osx" # prettier than darwin
- if usejvm:
- env.Append( CPPPATH=[ "-I/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Headers/" ] )
-
- if not nojni:
- env.Append( FRAMEWORKS=["JavaVM"] )
-
if env["CXX"] is None:
if os.path.exists( "/usr/bin/g++-4.2" ):
env["CXX"] = "g++-4.2"
@@ -493,7 +541,7 @@ if "darwin" == os.sys.platform:
if force64:
env.Append( CPPPATH=["/usr/64/include"] )
env.Append( LIBPATH=["/usr/64/lib"] )
- if installDir == DEFAULT_INSTALl_DIR and not distBuild:
+ if installDir == DEFAULT_INSTALL_DIR and not distBuild:
installDir = "/usr/64/"
else:
env.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
@@ -501,15 +549,10 @@ if "darwin" == os.sys.platform:
elif "linux2" == os.sys.platform:
linux = True
- useJavaHome = True
- javaOS = "linux"
platform = "linux"
- javaHome = choosePathExist( [ javaHome , "/usr/lib/jvm/java/" , os.environ.get( "JAVA_HOME" ) ] , "/usr/lib/jvm/java/" )
-
if os.uname()[4] == "x86_64" and not force32:
linux64 = True
- javaVersion = "amd64"
nixLibPrefix = "lib64"
env.Append( LIBPATH=["/usr/lib64" , "/lib64" ] )
env.Append( LIBS=["pthread"] )
@@ -527,10 +570,7 @@ elif "linux2" == os.sys.platform:
elif "sunos5" == os.sys.platform:
nix = True
solaris = True
- useJavaHome = True
- javaHome = "/usr/lib/jvm/java-6-sun/"
- javaOS = "solaris"
- env.Append( CPPDEFINES=[ "__linux__" , "__sunos__" ] )
+ env.Append( CPPDEFINES=[ "__sunos__" ] )
env.Append( LIBS=["socket","resolv"] )
elif os.sys.platform.startswith( "freebsd" ):
@@ -540,13 +580,21 @@ elif os.sys.platform.startswith( "freebsd" ):
env.Append( LIBPATH=[ "/usr/local/lib" ] )
env.Append( CPPDEFINES=[ "__freebsd__" ] )
+elif os.sys.platform.startswith( "openbsd" ):
+ nix = True
+ openbsd = True
+ env.Append( CPPPATH=[ "/usr/local/include" ] )
+ env.Append( LIBPATH=[ "/usr/local/lib" ] )
+ env.Append( CPPDEFINES=[ "__openbsd__" ] )
+
elif "win32" == os.sys.platform:
windows = True
- if force64:
- release = True
+ #if force64:
+ # release = True
for pathdir in env['ENV']['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(pathdir, 'cl.exe')):
+ print( "found visual studio at " + pathdir )
break
else:
#use current environment
@@ -554,75 +602,101 @@ elif "win32" == os.sys.platform:
def find_boost():
for x in ('', ' (x86)'):
+ boostDir = "C:/Program Files" + x + "/boost/latest"
+ if os.path.exists( boostDir ):
+ return boostDir
for bv in reversed( range(33,50) ):
for extra in ('', '_0', '_1'):
boostDir = "C:/Program Files" + x + "/Boost/boost_1_" + str(bv) + extra
if os.path.exists( boostDir ):
return boostDir
if os.path.exists( "C:/boost" ):
- return "C:/boost"
+ return "C:/boost"
if os.path.exists( "/boost" ):
- return "/boost"
+ return "/boost"
return None
-
boostDir = find_boost()
if boostDir is None:
print( "can't find boost" )
Exit(1)
-
- if force64 and os.path.exists( boostDir + "/lib/vs2010_64" ):
- env.Append( LIBPATH=[ boostDir + "/lib/vs2010_64" ] )
- elif not force64 and os.path.exists( boostDir + "/lib/vs2010_32" ):
- env.Append( LIBPATH=[ boostDir + "/lib/vs2010_32" ] )
else:
- env.Append( LIBPATH=[ boostDir + "/Lib" ] )
-
+ print( "boost found at '" + boostDir + "'" )
serverOnlyFiles += [ "util/ntservice.cpp" ]
boostLibs = []
- if usesm:
- env.Append( CPPPATH=[ "js/src/" ] )
- env.Append(CPPPATH=["../js/src/"])
- env.Append(LIBPATH=["../js/src"])
- env.Append( CPPDEFINES=[ "OLDJS" ] )
- elif not justClientLib:
- javaHome = findVersion( "C:/Program Files/java/" ,
- [ "jdk" , "jdk1.6.0_10" ] )
- env.Append( CPPPATH=[ javaHome + "/include" , javaHome + "/include/win32" ] )
- env.Append( LIBPATH=[ javaHome + "/Lib" ] )
- javaLibs += [ "jvm" ];
+ env.Append(CPPPATH=[ "js/src/" ])
+ env.Append(CPPPATH=["../js/src/"])
+ env.Append(LIBPATH=["../js/src"])
+ env.Append(LIBPATH=["../js/"])
+
+ env.Append( CPPDEFINES=[ "OLDJS" ] )
+ env.Append( CPPDEFINES=[ "_UNICODE" ] )
+ env.Append( CPPDEFINES=[ "UNICODE" ] )
winSDKHome = findVersion( [ "C:/Program Files/Microsoft SDKs/Windows/", "C:/Program Files (x86)/Microsoft SDKs/Windows/" ] ,
- [ "v6.0" , "v6.0a" , "v6.1", "v7.0A" ] )
+ [ "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
+ print( "Windows SDK Root '" + winSDKHome + "'" )
env.Append( CPPPATH=[ boostDir , "pcre-7.4" , winSDKHome + "/Include" ] )
+ # consider adding /MP build with multiple processes option.
+
+ # /EHsc exception handling style for visual studio
+ # /W3 warning level
env.Append( CPPFLAGS=" /EHsc /W3 " )
- env.Append( CPPFLAGS=" /wd4355 /wd4800 " ) #some warnings we don't like
- env.Append( CPPDEFINES=["WIN32","_CONSOLE","_CRT_SECURE_NO_WARNINGS","HAVE_CONFIG_H","PCRE_STATIC","_UNICODE","UNICODE","SUPPORT_UCP","SUPPORT_UTF8,PSAPI_VERSION=1" ] )
- #env.Append( CPPFLAGS=' /Yu"stdafx.h" ' ) # this would be for pre-compiled headers, could play with it later
+ # some warnings we don't like:
+ env.Append( CPPFLAGS=" /wd4355 /wd4800 /wd4267 /wd4244 " )
+
+ env.Append( CPPDEFINES=["WIN32","_CONSOLE","_CRT_SECURE_NO_WARNINGS","HAVE_CONFIG_H","PCRE_STATIC","SUPPORT_UCP","SUPPORT_UTF8,PSAPI_VERSION=1" ] )
+
+ #env.Append( CPPFLAGS=' /Yu"pch.h" ' ) # this would be for pre-compiled headers, could play with it later
+ # docs say don't use /FD from command line
+ # /Gy funtion level linking
+ # /Gm is minimal rebuild, but may not work in parallel mode.
if release:
env.Append( CPPDEFINES=[ "NDEBUG" ] )
- env.Append( CPPFLAGS= " /O2 /Oi /FD /MT /Gy /nologo /Zi /TP /errorReport:prompt /Gm " )
- #env.Append( CPPFLAGS= " /GL " ) # TODO: this has caused some linking problems
+ env.Append( CPPFLAGS= " /O2 /MT /Gy /Zi /TP /errorReport:none " )
+ # TODO: this has caused some linking problems :
+ # /GL whole program optimization
+ # /LTCG link time code generation
+ env.Append( CPPFLAGS= " /GL " )
+ env.Append( LINKFLAGS=" /LTCG " )
else:
env.Append( CPPDEFINES=[ "_DEBUG" ] )
- env.Append( CPPFLAGS=" /Od /Gm /RTC1 /MDd /ZI " )
+ # /Od disable optimization
+ # /ZI debug info w/edit & continue
+ # /TP it's a c++ file
+ # RTC1 /GZ (Enable Stack Frame Run-Time Error Checking)
+ env.Append( CPPFLAGS=" /Od /RTC1 /MDd /Z7 /TP /errorReport:none " )
env.Append( CPPFLAGS=' /Fd"mongod.pdb" ' )
- env.Append( LINKFLAGS=" /incremental:yes /debug " )
+ env.Append( LINKFLAGS=" /debug " )
+
+ if os.path.exists("../readline/lib") :
+ env.Append( LIBPATH=["../readline/lib"] )
+ env.Append( CPPPATH=["../readline/include"] )
+
+ if force64 and os.path.exists( boostDir + "/lib/vs2010_64" ):
+ env.Append( LIBPATH=[ boostDir + "/lib/vs2010_64" ] )
+ elif not force64 and os.path.exists( boostDir + "/lib/vs2010_32" ):
+ env.Append( LIBPATH=[ boostDir + "/lib/vs2010_32" ] )
+ else:
+ env.Append( LIBPATH=[ boostDir + "/Lib" ] )
- env.Append( LIBPATH=[ boostDir + "/Lib" ] )
if force64:
env.Append( LIBPATH=[ winSDKHome + "/Lib/x64" ] )
- env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRT " )
else:
env.Append( LIBPATH=[ winSDKHome + "/Lib" ] )
+ if release:
+ #env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRTD " )
+ env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT " )
+ else:
+ env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRT " )
def pcreFilter(x):
name = x.name
@@ -647,17 +721,21 @@ elif "win32" == os.sys.platform:
winLibString = "ws2_32.lib kernel32.lib advapi32.lib Psapi.lib"
if force64:
- winLibString += " LIBCMT LIBCPMT "
+
+ winLibString += ""
+ #winLibString += " LIBCMT LIBCPMT "
+
else:
winLibString += " user32.lib gdi32.lib winspool.lib comdlg32.lib shell32.lib ole32.lib oleaut32.lib "
winLibString += " odbc32.lib odbccp32.lib uuid.lib "
env.Append( LIBS=Split(winLibString) )
- if force64:
- env.Append( CPPDEFINES=["_AMD64_=1"] )
- else:
- env.Append( CPPDEFINES=["_X86_=1"] )
+ # dm these should automatically be defined by the compiler. commenting out to see if works. jun2010
+ #if force64:
+ # env.Append( CPPDEFINES=["_AMD64_=1"] )
+ #else:
+ # env.Append( CPPDEFINES=["_X86_=1"] )
env.Append( CPPPATH=["../winpcap/Include"] )
env.Append( LIBPATH=["../winpcap/Lib"] )
@@ -665,24 +743,20 @@ elif "win32" == os.sys.platform:
else:
print( "No special config for [" + os.sys.platform + "] which probably means it won't work" )
-if not nojni and useJavaHome:
- env.Append( CPPPATH=[ javaHome + "include" , javaHome + "include/" + javaOS ] )
- env.Append( LIBPATH=[ javaHome + "jre/lib/" + javaVersion + "/server" , javaHome + "jre/lib/" + javaVersion ] )
-
- if not nojni:
- javaLibs += [ "java" , "jvm" ]
-
- env.Append( LINKFLAGS="-Xlinker -rpath -Xlinker " + javaHome + "jre/lib/" + javaVersion + "/server" )
- env.Append( LINKFLAGS="-Xlinker -rpath -Xlinker " + javaHome + "jre/lib/" + javaVersion )
-
if nix:
env.Append( CPPFLAGS="-fPIC -fno-strict-aliasing -ggdb -pthread -Wall -Wsign-compare -Wno-unknown-pragmas -Winvalid-pch" )
+ if linux:
+ env.Append( CPPFLAGS=" -Werror " )
env.Append( CXXFLAGS=" -Wnon-virtual-dtor " )
env.Append( LINKFLAGS=" -fPIC -pthread -rdynamic" )
env.Append( LIBS=[] )
+ if linux and GetOption( "sharedclient" ):
+ env.Append( LINKFLAGS=" -Wl,--as-needed -Wl,-zdefs " )
+
if debugBuild:
env.Append( CPPFLAGS=" -O0 -fstack-protector " );
+ env['ENV']['GLIBCXX_FORCE_NEW'] = 1; # play nice with valgrind
else:
env.Append( CPPFLAGS=" -O3" )
@@ -706,11 +780,12 @@ if nix:
env.Append( CPPDEFINES=["USE_GDBSERVER"] )
# pre-compiled headers
- if False and 'Gch' in dir( env ):
+ if usePCH and 'Gch' in dir( env ):
print( "using precompiled headers" )
- env['Gch'] = env.Gch( [ "stdafx.h" ] )[0]
- #Depends( "stdafx.o" , "stdafx.h.gch" )
- #SideEffect( "dummyGCHSideEffect" , "stdafx.h.gch" )
+ env['Gch'] = env.Gch( [ "pch.h" ] )[0]
+ elif os.path.exists('pch.h.gch'):
+ print( "removing precompiled headers" )
+ os.unlink('pch.h.gch') # gcc uses the file if it exists
if usev8:
env.Append( CPPPATH=["../v8/include/"] )
@@ -729,46 +804,6 @@ except OSError:
# --- check system ---
-def getGitBranch():
- if not os.path.exists( ".git" ):
- return None
-
- version = open( ".git/HEAD" ,'r' ).read().strip()
- if not version.startswith( "ref: " ):
- return version
- version = version.split( "/" )
- version = version[len(version)-1]
- return version
-
-def getGitBranchString( prefix="" , postfix="" ):
- t = re.compile( '[/\\\]' ).split( os.getcwd() )
- if len(t) > 2 and t[len(t)-1] == "mongo":
- par = t[len(t)-2]
- m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
- if m is not None:
- return prefix + m.group(1).lower() + postfix
- if par.find("Nightly") > 0:
- return ""
-
-
- b = getGitBranch()
- if b == None or b == "master":
- return ""
- return prefix + b + postfix
-
-def getGitVersion():
- if not os.path.exists( ".git" ):
- return "nogitversion"
-
- version = open( ".git/HEAD" ,'r' ).read().strip()
- if not version.startswith( "ref: " ):
- return version
- version = version[5:]
- f = ".git/" + version
- if not os.path.exists( f ):
- return version
- return open( f , 'r' ).read().strip()
-
def getSysInfo():
if windows:
return "windows " + str( sys.getwindowsversion() )
@@ -781,14 +816,16 @@ def add_exe(target):
return target
def setupBuildInfoFile( outFile ):
- version = getGitVersion()
+ version = utils.getGitVersion()
+ if len(moduleNames) > 0:
+ version = version + " modules: " + ','.join( moduleNames )
sysInfo = getSysInfo()
contents = '\n'.join([
- '#include "stdafx.h"',
+ '#include "pch.h"',
'#include <iostream>',
'#include <boost/version.hpp>',
'namespace mongo { const char * gitVersion(){ return "' + version + '"; } }',
- 'namespace mongo { const char * sysInfo(){ return "' + sysInfo + ' BOOST_LIB_VERSION=" BOOST_LIB_VERSION ; } }',
+ 'namespace mongo { string sysInfo(){ return "' + sysInfo + ' BOOST_LIB_VERSION=" BOOST_LIB_VERSION ; } }',
])
contents += '\n';
@@ -811,7 +848,7 @@ def bigLibString( myenv ):
return s
-def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
+def doConfigure( myenv , needPcre=True , shell=False ):
conf = Configure(myenv)
myenv["LINKFLAGS_CLEAN"] = list( myenv["LINKFLAGS"] )
myenv["LIBS_CLEAN"] = list( myenv["LIBS"] )
@@ -826,7 +863,7 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
print( "can't find stdc++ library which is needed" );
Exit(1)
- def myCheckLib( poss , failIfNotFound=False , java=False , staticOnly=False):
+ def myCheckLib( poss , failIfNotFound=False , staticOnly=False):
if type( poss ) != types.ListType :
poss = [poss]
@@ -847,7 +884,7 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
return True
- if release and not java and not windows and failIfNotFound:
+ if release and not windows and failIfNotFound:
print( "ERROR: can't find static version of: " + str( poss ) + " in: " + str( allPlaces ) )
Exit(1)
@@ -856,7 +893,10 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
return True
if failIfNotFound:
- print( "can't find library " + str( poss ) + " in " + str( myenv["LIBPATH"] ) )
+ print( "can't find or link against library " + str( poss ) + " in " + str( myenv["LIBPATH"] ) )
+ print( "see config.log for more information" )
+ if windows:
+ print( "use scons --64 when cl.exe is 64 bit compiler" )
Exit(1)
return False
@@ -878,7 +918,7 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
else:
print( "WARNING: old version of boost - you should consider upgrading" )
- # this will add it iff it exists and works
+ # this will add it if it exists and works
myCheckLib( [ "boost_system" + boostCompiler + "-mt" + boostVersion ,
"boost_system" + boostCompiler + boostVersion ] )
@@ -891,10 +931,6 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
if not conf.CheckCXXHeader( "execinfo.h" ):
myenv.Append( CPPDEFINES=[ "NOEXECINFO" ] )
- if needJava:
- for j in javaLibs:
- myCheckLib( j , True , True )
-
if nix and needPcre:
myCheckLib( "pcrecpp" , True )
myCheckLib( "pcre" , True )
@@ -917,7 +953,24 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
if usesm:
- myCheckLib( [ "mozjs" , "js", "js_static" ] , True )
+ # see http://www.mongodb.org/pages/viewpageattachments.action?pageId=12157032
+ J = [ "mozjs" , "js", "js_static" ]
+ if windows:
+ if msarch == "amd64":
+ if release:
+ J = [ "js64r", "js", "mozjs" , "js_static" ]
+ else:
+ J = "js64d"
+ print( "looking for js64d.lib for spidermonkey. (available at mongodb.org prebuilt)" );
+ else:
+ if not force32:
+ print( "Assuming a 32 bit build is desired" )
+ if release:
+ J = [ "js32r", "js", "mozjs" , "js_static" ]
+ else:
+ J = [ "js32d", "js", "mozjs" , "js_static" ]
+
+ myCheckLib( J , True )
mozHeader = "js"
if bigLibString(myenv).find( "mozjs" ) >= 0:
mozHeader = "mozjs"
@@ -944,18 +997,22 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
myCheckLib( "ncurses" , True )
else:
myenv.Append( LINKFLAGS=" /usr/lib/libreadline.dylib " )
+ elif openbsd:
+ myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
+ myCheckLib( "termcap" , True )
+ myCheckLib( "readline" , True )
elif myCheckLib( "readline" , release and nix , staticOnly=release ):
myenv.Append( CPPDEFINES=[ "USE_READLINE" ] )
myCheckLib( "ncurses" , staticOnly=release )
myCheckLib( "tinfo" , staticOnly=release )
else:
- print( "warning: no readline, shell will be a bit ugly" )
+ print( "\n*** notice: no readline library, mongo shell will not have nice interactive line editing ***\n" )
if linux:
myCheckLib( "rt" , True )
# requires ports devel/libexecinfo to be installed
- if freebsd:
+ if freebsd or openbsd:
myCheckLib( "execinfo", True )
env.Append( LIBS=[ "execinfo" ] )
@@ -1005,7 +1062,13 @@ def concatjs(target, source, env):
for s in source:
f = open( str(s) , 'r' )
for l in f:
- l = l.split("//")[0].strip()
+
+ #strip comments. special case if // is potentially in a string
+ parts = l.split("//", 1)
+ if (len(parts) > 1) and ('"' not in parts[1]) and ('"' not in parts[1]):
+ l = parts[0]
+
+ l = l.strip()
if len ( l ) == 0:
continue
@@ -1047,7 +1110,6 @@ def jsToH(target, source, env):
for l in open( str(source[0]) , 'r' ):
l = l.strip()
- l = l.split( "//" )[0]
l = l.replace( '\\' , "\\\\" )
l = l.replace( '"' , "\\\"" )
@@ -1058,6 +1120,13 @@ def jsToH(target, source, env):
out = open( outFile , 'w' )
out.write( h )
+ out.close()
+
+ # mongo_vstudio.cpp is in git as the .vcproj doesn't generate this file.
+ if outFile.find( "mongo.cpp" ) >= 0:
+ out = open( outFile.replace( "mongo" , "mongo_vstudio" ) , 'w' )
+ out.write( h )
+ out.close()
return None
@@ -1074,6 +1143,7 @@ clientEnv = env.Clone();
clientEnv.Append( CPPPATH=["../"] )
clientEnv.Prepend( LIBS=[ "mongoclient"] )
clientEnv.Prepend( LIBPATH=["."] )
+#clientEnv["CPPDEFINES"].remove( "MONGO_EXPOSE_MACROS" )
l = clientEnv[ "LIBS" ]
removeIfInList( l , "pcre" )
removeIfInList( l , "pcrecpp" )
@@ -1083,7 +1153,6 @@ testEnv.Append( CPPPATH=["../"] )
testEnv.Prepend( LIBS=[ "mongotestfiles" ] )
testEnv.Prepend( LIBPATH=["."] )
-
# ----- TARGETS ------
def checkErrorCodes():
@@ -1095,16 +1164,18 @@ def checkErrorCodes():
checkErrorCodes()
# main db target
-mongod = env.Program( "mongod" , commonFiles + coreDbFiles + serverOnlyFiles + [ "db/db.cpp" ] )
+mongod = env.Program( "mongod" , commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + [ "db/db.cpp" ] )
Default( mongod )
# tools
-allToolFiles = commonFiles + coreDbFiles + serverOnlyFiles + [ "client/gridfs.cpp", "tools/tool.cpp" ]
+allToolFiles = commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + [ "client/gridfs.cpp", "tools/tool.cpp" ]
normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" ]
-env.Alias( "tools" , [ "mongo" + x for x in normalTools ] )
+env.Alias( "tools" , [ add_exe( "mongo" + x ) for x in normalTools ] )
for x in normalTools:
env.Program( "mongo" + x , allToolFiles + [ "tools/" + x + ".cpp" ] )
+#some special tools
+env.Program( "bsondump" , allToolFiles + [ "tools/bsondump.cpp" ] )
env.Program( "mongobridge" , allToolFiles + [ "tools/bridge.cpp" ] )
# mongos
@@ -1114,7 +1185,8 @@ mongos = env.Program( "mongos" , commonFiles + coreDbFiles + coreServerFiles + s
clientLibName = str( env.Library( "mongoclient" , allClientFiles )[0] )
if GetOption( "sharedclient" ):
sharedClientLibName = str( env.SharedLibrary( "mongoclient" , allClientFiles )[0] )
-env.Library( "mongotestfiles" , commonFiles + coreDbFiles + serverOnlyFiles + ["client/gridfs.cpp"])
+env.Library( "mongotestfiles" , commonFiles + coreDbFiles + coreServerFiles + serverOnlyFiles + ["client/gridfs.cpp"])
+env.Library( "mongoshellfiles" , allClientFiles + coreServerFiles )
clientTests = []
@@ -1124,9 +1196,12 @@ clientTests += [ clientEnv.Program( "secondExample" , [ "client/examples/second.
clientTests += [ clientEnv.Program( "whereExample" , [ "client/examples/whereExample.cpp" ] ) ]
clientTests += [ clientEnv.Program( "authTest" , [ "client/examples/authTest.cpp" ] ) ]
clientTests += [ clientEnv.Program( "httpClientTest" , [ "client/examples/httpClientTest.cpp" ] ) ]
+# clientTests += [ clientEnv.Program( "bsondemo" , [ "bson/bsondemo/bsondemo.cpp" ] ) ] #TODO
# testing
test = testEnv.Program( "test" , Glob( "dbtests/*.cpp" ) )
+if windows:
+ testEnv.Alias( "test" , "test.exe" )
perftest = testEnv.Program( "perftest", [ "dbtests/framework.cpp" , "dbtests/perf/perftest.cpp" ] )
clientTests += [ clientEnv.Program( "clientTest" , [ "client/examples/clientTest.cpp" ] ) ]
@@ -1149,7 +1224,6 @@ env.JSHeader( "shell/mongo.jsall" )
env.JSConcat( "shell/mongo-server.jsall" , [ "shell/servers.js"] )
env.JSHeader( "shell/mongo-server.jsall" )
-
shellEnv = env.Clone();
if release and ( ( darwin and force64 ) or linux64 ):
@@ -1172,9 +1246,6 @@ elif not onlyServer:
shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib"]) )
l = shellEnv["LIBS"]
- if linux64:
- removeIfInList( l , "java" )
- removeIfInList( l , "jvm" )
removeIfInList( l , "pcre" )
removeIfInList( l , "pcrecpp" )
@@ -1182,22 +1253,25 @@ elif not onlyServer:
if windows:
shellEnv.Append( LIBS=["winmm.lib"] )
- coreShellFiles = [ "shell/dbshell.cpp" , "shell/utils.cpp" , "shell/mongo-server.cpp" ]
+ coreShellFiles = [ "shell/dbshell.cpp" , "shell/shell_utils.cpp" , "shell/mongo-server.cpp" ]
if weird:
shell32BitFiles = coreShellFiles
for f in allClientFiles:
shell32BitFiles.append( "32bit/" + str( f ) )
+ for f in scriptingFiles:
+ shell32BitFiles.append( "32bit/" + str( f ) )
shellEnv.VariantDir( "32bit" , "." )
+ shellEnv.Append( CPPPATH=["32bit/"] )
else:
shellEnv.Prepend( LIBPATH=[ "." ] )
- shellEnv = doConfigure( shellEnv , needPcre=False , needJava=False , shell=True )
+ shellEnv = doConfigure( shellEnv , needPcre=False , shell=True )
if weird:
mongo = shellEnv.Program( "mongo" , shell32BitFiles )
else:
- shellEnv.Prepend( LIBS=[ "mongoclient"] )
+ shellEnv.Prepend( LIBS=[ "mongoshellfiles"] )
mongo = shellEnv.Program( "mongo" , coreShellFiles )
if weird:
@@ -1207,185 +1281,85 @@ elif not onlyServer:
# ---- RUNNING TESTS ----
-testEnv.Alias( "dummySmokeSideEffect", [], [] )
+smokeEnv = testEnv.Clone()
+smokeEnv['ENV']['PATH']=os.environ['PATH']
+smokeEnv.Alias( "dummySmokeSideEffect", [], [] )
-def addSmoketest( name, deps, actions ):
- if type( actions ) == type( list() ):
- actions = [ testSetup ] + actions
- else:
- actions = [ testSetup, actions ]
- testEnv.Alias( name, deps, actions )
- testEnv.AlwaysBuild( name )
- # Prevent smoke tests from running in parallel
- testEnv.SideEffect( "dummySmokeSideEffect", name )
-
-def ensureDir( name ):
- d = os.path.dirname( name )
- if not os.path.exists( d ):
- print( "Creating dir: " + name );
- os.makedirs( d )
- if not os.path.exists( d ):
- print( "Failed to create dir: " + name );
- Exit( 1 )
-
-def ensureTestDirs():
- ensureDir( "/tmp/unittest/" )
- ensureDir( "/data/" )
- ensureDir( "/data/db/" )
-
-def testSetup( env , target , source ):
- ensureTestDirs()
-
-if len( COMMAND_LINE_TARGETS ) == 1 and str( COMMAND_LINE_TARGETS[0] ) == "test":
- ensureDir( "/tmp/unittest/" );
-
-addSmoketest( "smoke", [ add_exe( "test" ) ] , [ test[ 0 ].abspath ] )
-addSmoketest( "smokePerf", [ "perftest" ] , [ perftest[ 0 ].abspath ] )
-
-clientExec = [ x[0].abspath for x in clientTests ]
-def runClientTests( env, target, source ):
- global clientExec
- global mongodForTestsPort
- import subprocess
- for i in clientExec:
- if subprocess.call( [ i, "--port", mongodForTestsPort ] ) != 0:
- return True
- if subprocess.Popen( [ mongod[0].abspath, "msg", "ping", mongodForTestsPort ], stdout=subprocess.PIPE ).communicate()[ 0 ].count( "****ok" ) == 0:
- return True
- if subprocess.call( [ mongod[0].abspath, "msg", "ping", mongodForTestsPort ] ) != 0:
- return True
- return False
-addSmoketest( "smokeClient" , clientExec, runClientTests )
-addSmoketest( "mongosTest" , [ mongos[0].abspath ] , [ mongos[0].abspath + " --test" ] )
+smokeFlags = []
-def jsSpec( suffix ):
- import os.path
- args = [ os.path.dirname( mongo[0].abspath ), "jstests" ] + suffix
- return apply( os.path.join, args )
+# Ugh. Frobbing the smokeFlags must precede using them to construct
+# actions, I think.
+if GetOption( 'smokedbprefix') is not None:
+ smokeFlags += ['--smoke-db-prefix', GetOption( 'smokedbprefix')]
-def jsDirTestSpec( dir ):
- path = jsSpec( [ dir + '/*.js' ] )
- paths = [x.abspath for x in Glob( path ) ]
- return mongo[0].abspath + " --nodb " + ' '.join( paths )
+if 'startMongodSmallOplog' in COMMAND_LINE_TARGETS:
+ smokeFlags += ["--small-oplog"]
-def runShellTest( env, target, source ):
- global mongodForTestsPort
- import subprocess
- target = str( target[0] )
- if target == "smokeJs":
- spec = [ jsSpec( [ "_runner.js" ] ) ]
- elif target == "smokeQuota":
- g = Glob( jsSpec( [ "quota/*.js" ] ) )
- spec = [ x.abspath for x in g ]
- elif target == "smokeJsPerf":
- g = Glob( jsSpec( [ "perf/*.js" ] ) )
- spec = [ x.abspath for x in g ]
- elif target == "smokeJsSlow":
- spec = [x.abspath for x in Glob(jsSpec(["slow/*"]))]
- elif target == "smokeParallel":
- spec = [x.abspath for x in Glob(jsSpec(["parallel/*"]))]
- else:
- print( "invalid target for runShellTest()" )
- Exit( 1 )
- return subprocess.call( [ mongo[0].abspath, "--port", mongodForTestsPort ] + spec )
+def addTest(name, deps, actions):
+ smokeEnv.Alias( name, deps, actions )
+ smokeEnv.AlwaysBuild( name )
+ # Prevent smoke tests from running in parallel
+ smokeEnv.SideEffect( "dummySmokeSideEffect", name )
-# These tests require the mongo shell
-if not onlyServer and not noshell:
- addSmoketest( "smokeJs", [add_exe("mongo")], runShellTest )
- addSmoketest( "smokeClone", [ "mongo", "mongod" ], [ jsDirTestSpec( "clone" ) ] )
- addSmoketest( "smokeRepl", [ "mongo", "mongod", "mongobridge" ], [ jsDirTestSpec( "repl" ) ] )
- addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ) ], [ jsDirTestSpec( "disk" ) ] )
- addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ], [ jsDirTestSpec( "auth" ) ] )
- addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ], runShellTest )
- addSmoketest( "smokeSharding", [ "mongo", "mongod", "mongos" ], [ jsDirTestSpec( "sharding" ) ] )
- addSmoketest( "smokeJsPerf", [ "mongo" ], runShellTest )
- addSmoketest("smokeJsSlow", [add_exe("mongo")], runShellTest)
- addSmoketest( "smokeQuota", [ "mongo" ], runShellTest )
- addSmoketest( "smokeTool", [ add_exe( "mongo" ) ], [ jsDirTestSpec( "tool" ) ] )
-
-mongodForTests = None
-mongodForTestsPort = "27017"
-
-def startMongodWithArgs(*args):
- global mongodForTests
- global mongodForTestsPort
- global mongod
- if mongodForTests:
- return
- mongodForTestsPort = "32000"
- import os
- ensureTestDirs()
- dirName = "/data/db/sconsTests/"
- ensureDir( dirName )
- from subprocess import Popen
- mongodForTests = Popen([mongod[0].abspath, "--port", mongodForTestsPort,
- "--dbpath", dirName] + list(args))
-
- if not utils.didMongodStart( 32000 ):
- print( "Failed to start mongod" )
- mongodForTests = None
- Exit( 1 )
-
-def startMongodForTests( env, target, source ):
- return startMongodWithArgs()
-
-def startMongodSmallOplog(env, target, source):
- return startMongodWithArgs("--master", "--oplogSize", "10")
-
-def stopMongodForTests():
- global mongodForTests
- if not mongodForTests:
- return
- if mongodForTests.poll() is not None:
- print( "Failed to start mongod" )
- mongodForTests = None
- Exit( 1 )
- try:
- # This function not available in Python 2.5
- mongodForTests.terminate()
- except AttributeError:
- if windows:
- import win32process
- win32process.TerminateProcess(mongodForTests._handle, -1)
- else:
- from os import kill
- kill( mongodForTests.pid, 15 )
- mongodForTests.wait()
+def addSmoketest( name, deps ):
+ addTest(name, deps, [ "python buildscripts/smoke.py " + " ".join(smokeFlags) + ' ' + name ])
-testEnv.Alias( "startMongod", [add_exe("mongod")], [startMongodForTests] );
-testEnv.AlwaysBuild( "startMongod" );
-testEnv.SideEffect( "dummySmokeSideEffect", "startMongod" )
+addSmoketest( "smoke", [ add_exe( "test" ) ] )
+addSmoketest( "smokePerf", [ "perftest" ] )
+addSmoketest( "smokeClient" , clientTests )
+addSmoketest( "mongosTest" , [ mongos[0].abspath ] )
-testEnv.Alias( "startMongodSmallOplog", [add_exe("mongod")], [startMongodSmallOplog] );
-testEnv.AlwaysBuild( "startMongodSmallOplog" );
-testEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
+# These tests require the mongo shell
+if not onlyServer and not noshell:
+ addSmoketest( "smokeJs", [add_exe("mongo")] )
+ addSmoketest( "smokeClone", [ "mongo", "mongod" ] )
+ addSmoketest( "smokeRepl", [ "mongo", "mongod", "mongobridge" ] )
+ addSmoketest( "smokeReplSets", [ "mongo", "mongod", "mongobridge" ] )
+ addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
+ addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
+ addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
+ addSmoketest( "smokeSharding", [ "mongo", "mongod", "mongos" ] )
+ addSmoketest( "smokeJsPerf", [ "mongo" ] )
+ addSmoketest("smokeJsSlowNightly", [add_exe("mongo")])
+ addSmoketest("smokeJsSlowWeekly", [add_exe("mongo")])
+ addSmoketest( "smokeQuota", [ "mongo" ] )
+ addSmoketest( "smokeTool", [ add_exe( "mongo" ) ] )
+
+# Note: although the test running logic has been moved to
+# buildscripts/smoke.py, the interface to running the tests has been
+# something like 'scons startMongod <suite>'; startMongod is now a
+# no-op, and should go away eventually.
+smokeEnv.Alias( "startMongod", [add_exe("mongod")]);
+smokeEnv.AlwaysBuild( "startMongod" );
+smokeEnv.SideEffect( "dummySmokeSideEffect", "startMongod" )
+
+smokeEnv.Alias( "startMongodSmallOplog", [add_exe("mongod")], [] );
+smokeEnv.AlwaysBuild( "startMongodSmallOplog" );
+smokeEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
def addMongodReqTargets( env, target, source ):
- mongodReqTargets = [ "smokeClient", "smokeJs", "smokeQuota" ]
+ mongodReqTargets = [ "smokeClient", "smokeJs" ]
for target in mongodReqTargets:
- testEnv.Depends( target, "startMongod" )
- testEnv.Depends( "smokeAll", target )
+ smokeEnv.Depends( target, "startMongod" )
+ smokeEnv.Depends( "smokeAll", target )
-testEnv.Alias( "addMongodReqTargets", [], [addMongodReqTargets] )
-testEnv.AlwaysBuild( "addMongodReqTargets" )
+smokeEnv.Alias( "addMongodReqTargets", [], [addMongodReqTargets] )
+smokeEnv.AlwaysBuild( "addMongodReqTargets" )
-testEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeAuth", "smokeSharding", "smokeTool" ] )
-testEnv.AlwaysBuild( "smokeAll" )
+smokeEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeAuth", "smokeSharding", "smokeTool" ] )
+smokeEnv.AlwaysBuild( "smokeAll" )
def addMongodReqNoJsTargets( env, target, source ):
mongodReqTargets = [ "smokeClient" ]
for target in mongodReqTargets:
- testEnv.Depends( target, "startMongod" )
- testEnv.Depends( "smokeAllNoJs", target )
-
-testEnv.Alias( "addMongodReqNoJsTargets", [], [addMongodReqNoJsTargets] )
-testEnv.AlwaysBuild( "addMongodReqNoJsTargets" )
+ smokeEnv.Depends( target, "startMongod" )
+ smokeEnv.Depends( "smokeAllNoJs", target )
-testEnv.Alias( "smokeAllNoJs", [ "smoke", "mongosTest", "addMongodReqNoJsTargets" ] )
-testEnv.AlwaysBuild( "smokeAllNoJs" )
+smokeEnv.Alias( "addMongodReqNoJsTargets", [], [addMongodReqNoJsTargets] )
+smokeEnv.AlwaysBuild( "addMongodReqNoJsTargets" )
-import atexit
-atexit.register( stopMongodForTests )
+smokeEnv.Alias( "smokeAllNoJs", [ "smoke", "mongosTest", "addMongodReqNoJsTargets" ] )
+smokeEnv.AlwaysBuild( "smokeAllNoJs" )
def recordPerformance( env, target, source ):
from buildscripts import benchmark_tools
@@ -1406,7 +1380,7 @@ def recordPerformance( env, target, source ):
sub = { "benchmark": { "project": "http://github.com/mongodb/mongo", "description": "" }, "trial": {} }
sub[ "benchmark" ][ "name" ] = name
sub[ "benchmark" ][ "tags" ] = [ "c++", re.match( "(.*)__", name ).group( 1 ) ]
- sub[ "trial" ][ "server_hash" ] = getGitVersion()
+ sub[ "trial" ][ "server_hash" ] = utils.getGitVersion()
sub[ "trial" ][ "client_hash" ] = ""
sub[ "trial" ][ "result" ] = val
try:
@@ -1416,7 +1390,7 @@ def recordPerformance( env, target, source ):
print( sys.exc_info() )
return False
-addSmoketest( "recordPerf", [ "perftest" ] , [ recordPerformance ] )
+addTest( "recordPerf", [ "perftest" ] , [ recordPerformance ] )
def run_shell_tests(env, target, source):
from buildscripts import test_shell
@@ -1426,14 +1400,27 @@ def run_shell_tests(env, target, source):
env.Alias("test_shell", [], [run_shell_tests])
env.AlwaysBuild("test_shell")
+# ---- Docs ----
+def build_docs(env, target, source):
+ from buildscripts import docs
+ docs.main()
+
+env.Alias("docs", [], [build_docs])
+env.AlwaysBuild("docs")
+
# ---- INSTALL -------
def getSystemInstallName():
n = platform + "-" + processor
if static:
n += "-static"
+ if GetOption("nostrip"):
+ n += "-debugsymbols"
if nix and os.uname()[2].startswith( "8." ):
n += "-tiger"
+
+ if len(moduleNames) > 0:
+ n += "-" + "-".join( moduleNames )
try:
import settings
@@ -1450,13 +1437,16 @@ def getSystemInstallName():
return n
def getCodeVersion():
- fullSource = open( "stdafx.cpp" , "r" ).read()
+ fullSource = open( "util/version.cpp" , "r" ).read()
allMatches = re.findall( r"versionString.. = \"(.*?)\"" , fullSource );
if len(allMatches) != 1:
print( "can't find version # in code" )
return None
return allMatches[0]
+if getCodeVersion() == None:
+ Exit(-1)
+
def getDistName( sofar ):
global distName
global dontReplacePackage
@@ -1473,15 +1463,18 @@ def getDistName( sofar ):
return version
- return getGitBranchString( "" , "-" ) + today.strftime( "%Y-%m-%d" )
+ return utils.getGitBranchString( "" , "-" ) + today.strftime( "%Y-%m-%d" )
if distBuild:
- from datetime import date
- today = date.today()
- installDir = "mongodb-" + getSystemInstallName() + "-"
- installDir += getDistName( installDir )
- print "going to make dist: " + installDir
+ if isDriverBuild():
+ installDir = GetOption( "prefix" )
+ else:
+ from datetime import date
+ today = date.today()
+ installDir = "mongodb-" + getSystemInstallName() + "-"
+ installDir += getDistName( installDir )
+ print "going to make dist: " + installDir
# binaries
@@ -1496,6 +1489,9 @@ def checkGlibc(target,source,env):
allBinaries = []
def installBinary( e , name ):
+ if not installSetup.binaries:
+ return
+
global allBinaries
if windows:
@@ -1518,6 +1514,7 @@ def installBinary( e , name ):
for x in normalTools:
installBinary( env , "mongo" + x )
+installBinary( env , "bsondump" )
if mongosniff_built:
installBinary(env, "mongosniff")
@@ -1529,36 +1526,50 @@ if not noshell:
installBinary( env , "mongo" )
env.Alias( "all" , allBinaries )
-
-
-# NOTE: In some cases scons gets confused between installation targets and build
-# dependencies. Here, we use InstallAs instead of Install to prevent such confusion
-# on a case-by-case basis.
+env.Alias( "core" , [ add_exe( "mongo" ) , add_exe( "mongod" ) , add_exe( "mongos" ) ] )
#headers
-for id in [ "", "util/", "db/" , "client/" ]:
- env.Install( installDir + "/include/mongo/" + id , Glob( id + "*.h" ) )
+if installSetup.headers:
+ for id in [ "", "util/", "util/mongoutils/", "util/concurrency/", "db/" , "db/stats/" , "db/repl/" , "client/" , "bson/", "bson/util/" , "s/" , "scripting/" ]:
+ env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.h" ) )
+ env.Install( installDir + "/" + installSetup.headerRoot + "/mongo/" + id , Glob( id + "*.hpp" ) )
+
+if installSetup.clientSrc:
+ for x in allClientFiles:
+ x = str(x)
+ env.Install( installDir + "/mongo/" + x.rpartition( "/" )[0] , x )
#lib
-env.Install( installDir + "/" + nixLibPrefix, clientLibName )
-if usejvm:
- env.Install( installDir + "/" + nixLibPrefix + "/mongo/jars" , Glob( "jars/*" ) )
+if installSetup.libraries:
+ env.Install( installDir + "/" + nixLibPrefix, clientLibName )
+ if GetOption( "sharedclient" ):
+ env.Install( installDir + "/" + nixLibPrefix, sharedClientLibName )
+
#textfiles
-if distBuild or release:
- #don't want to install these /usr/local/ for example
- env.Install( installDir , "distsrc/README" )
- env.Install( installDir , "distsrc/THIRD-PARTY-NOTICES" )
- env.Install( installDir , "distsrc/GNU-AGPL-3.0" )
+if installSetup.bannerDir:
+ for x in os.listdir( installSetup.bannerDir ):
+ full = installSetup.bannerDir + "/" + x
+ if os.path.isdir( full ):
+ continue
+ if x.find( "~" ) >= 0:
+ continue
+ env.Install( installDir , full )
+
+if installSetup.clientTestsDir:
+ for x in os.listdir( installSetup.clientTestsDir ):
+ full = installSetup.clientTestsDir + "/" + x
+ if os.path.isdir( full ):
+ continue
+ if x.find( "~" ) >= 0:
+ continue
+ env.Install( installDir + '/' + installSetup.clientTestsDir , full )
#final alias
env.Alias( "install" , installDir )
# aliases
-if windows:
- env.Alias( "mongoclient" , "mongoclient.lib" )
-else:
- env.Alias( "mongoclient" , "libmongoclient.a" )
+env.Alias( "mongoclient" , GetOption( "sharedclient" ) and sharedClientLibName or clientLibName )
# ---- CONVENIENCE ----
@@ -1590,7 +1601,7 @@ def s3push( localName , remoteName=None , remotePrefix=None , fixName=True , pla
if remotePrefix is None:
if distName is None:
- remotePrefix = getGitBranchString( "-" ) + "-latest"
+ remotePrefix = utils.getGitBranchString( "-" ) + "-latest"
else:
remotePrefix = "-" + distName
@@ -1615,8 +1626,10 @@ def s3push( localName , remoteName=None , remotePrefix=None , fixName=True , pla
name = name.lower()
else:
name = remoteName
-
- if platformDir:
+
+ if isDriverBuild():
+ name = "cxx-driver/" + name
+ elif platformDir:
name = platform + "/" + name
print( "uploading " + localName + " to http://s3.amazonaws.com/" + s.name + "/" + name )
@@ -1637,16 +1650,35 @@ def s3dist( env , target , source ):
s3push( distFile , "mongodb" )
env.Append( TARFLAGS=" -z " )
-if windows:
- distFile = installDir + ".zip"
- env.Zip( distFile , installDir )
-else:
- distFile = installDir + ".tgz"
- env.Tar( distFile , installDir )
-env.Alias( "dist" , distFile )
-env.Alias( "s3dist" , [ "install" , distFile ] , [ s3dist ] )
-env.AlwaysBuild( "s3dist" )
+if installDir[-1] != "/":
+ if windows:
+ distFile = installDir + ".zip"
+ env.Zip( distFile , installDir )
+ else:
+ distFile = installDir + ".tgz"
+ env.Tar( distFile , installDir )
+
+ env.Alias( "dist" , distFile )
+ env.Alias( "s3dist" , [ "install" , distFile ] , [ s3dist ] )
+ env.AlwaysBuild( "s3dist" )
+
+
+# client dist
+def build_and_test_client(env, target, source):
+ from subprocess import call
+
+ if GetOption("extrapath") is not None:
+ scons_command = ["scons", "--extrapath=" + GetOption("extrapath")]
+ else:
+ scons_command = ["scons"]
+
+ call(scons_command + ["libmongoclient.a", "clientTests"], cwd=installDir)
+
+ return bool(call(["python", "buildscripts/smoke.py",
+ "--test-path", installDir, "smokeClient"]))
+env.Alias("clientBuild", [mongod, installDir], [build_and_test_client])
+env.AlwaysBuild("clientBuild")
def clean_old_dist_builds(env, target, source):
prefix = "mongodb-%s-%s" % (platform, processor)
diff --git a/bson/README b/bson/README
new file mode 100644
index 0000000..01ed654
--- /dev/null
+++ b/bson/README
@@ -0,0 +1,7 @@
+"BSON" stands for "binary JSON" - a binary storage format that is JSON inspired
+(and adds a couple extra types such as Date).
+
+This is the C++ implementation. Implementations which translate BSON<->JSON
+are available for most languages at bsonspec.org.
+
+
diff --git a/bson/bson.h b/bson/bson.h
new file mode 100644
index 0000000..3d92831
--- /dev/null
+++ b/bson/bson.h
@@ -0,0 +1,123 @@
+/* NOTE: Standalone bson header for when not using MongoDB.
+ See also: bsondemo.
+
+ MongoDB includes ../db/jsobj.h instead. This file, however, pulls in much less code / dependencies.
+*/
+
+/** @file bson.h
+ BSON classes
+*/
+
+/*
+ * Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ BSONObj and its helpers
+
+ "BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
+ represented in JSON (plus a few extensions useful for databases & other languages).
+
+ http://www.bsonspec.org/
+*/
+
+#pragma once
+
+#if defined(MONGO_EXPOSE_MACROS)
+#error this header is for client programs, not the mongo database itself. include jsobj.h instead.
+/* because we define simplistic assert helpers here that don't pull in a bunch of util -- so that
+ BSON can be used header only.
+ */
+#endif
+
+#include <iostream>
+#include <sstream>
+#include <boost/utility.hpp>
+#include "util/builder.h"
+
+namespace bson {
+
+ using std::string;
+ using std::stringstream;
+
+ class assertion : public std::exception {
+ public:
+ assertion( unsigned u , const string& s )
+ : id( u ) , msg( s ){
+ mongo::StringBuilder ss;
+ ss << "BsonAssertion id: " << u << " " << s;
+ full = ss.str();
+ }
+
+ virtual ~assertion() throw() {}
+
+ virtual const char* what() const throw() { return full.c_str(); }
+
+ unsigned id;
+ string msg;
+ string full;
+ };
+}
+
+namespace mongo {
+#if !defined(assert)
+ inline void assert(bool expr) {
+ if(!expr) {
+ throw bson::assertion( 0 , "assertion failure in bson library" );
+ }
+ }
+#endif
+#if !defined(uassert)
+ inline void uasserted(unsigned msgid, std::string s) {
+ throw bson::assertion( msgid , s );
+ }
+
+ inline void uassert(unsigned msgid, std::string msg, bool expr) {
+ if( !expr )
+ uasserted( msgid , msg );
+ }
+ inline void msgasserted(int msgid, const char *msg) {
+ throw bson::assertion( msgid , msg );
+ }
+ inline void msgasserted(int msgid, const std::string &msg) { msgasserted(msgid, msg.c_str()); }
+ inline void massert(unsigned msgid, std::string msg, bool expr) {
+ if(!expr) {
+ std::cout << "assertion failure in bson library: " << msgid << ' ' << msg << std::endl;
+ throw bson::assertion( msgid , msg );
+ }
+ }
+#endif
+}
+
+#include "../bson/bsontypes.h"
+#include "../bson/oid.h"
+#include "../bson/bsonelement.h"
+#include "../bson/bsonobj.h"
+#include "../bson/bsonmisc.h"
+#include "../bson/bsonobjbuilder.h"
+#include "../bson/bsonobjiterator.h"
+#include "../bson/bsoninlines.h"
+
+namespace mongo {
+
+ inline unsigned getRandomNumber() {
+#if defined(_WIN32)
+ return rand();
+#else
+ return random();
+#endif
+ }
+
+}
diff --git a/bson/bson_db.h b/bson/bson_db.h
new file mode 100644
index 0000000..18cd59f
--- /dev/null
+++ b/bson/bson_db.h
@@ -0,0 +1,70 @@
+/** @file bson_db.h
+
+ This file contains the implementation of BSON-related methods that are required
+ by the MongoDB database server.
+
+ Normally, for standalone BSON usage, you do not want this file - it will tend to
+ pull in some other files from the MongoDB project. Thus, bson.h (the main file
+ one would use) does not include this file.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../util/optime.h"
+
+namespace mongo {
+
+ /**
+ Timestamps are a special BSON datatype that is used internally for replication.
+ Append a timestamp element to the object being ebuilt.
+ @param time - in millis (but stored in seconds)
+ */
+ inline BSONObjBuilder& BSONObjBuilder::appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc ){
+ OpTime t( (unsigned) (time / 1000) , inc );
+ appendTimestamp( fieldName , t.asDate() );
+ return *this;
+ }
+
+ inline OpTime BSONElement::_opTime() const {
+ if( type() == mongo::Date || type() == Timestamp )
+ return OpTime( *reinterpret_cast< const unsigned long long* >( value() ) );
+ return OpTime();
+ }
+
+ inline string BSONElement::_asCode() const {
+ switch( type() ){
+ case mongo::String:
+ case Code:
+ return string(valuestr(), valuestrsize()-1);
+ case CodeWScope:
+ return string(codeWScopeCode(), *(int*)(valuestr())-1);
+ default:
+ log() << "can't convert type: " << (int)(type()) << " to code" << endl;
+ }
+ uassert( 10062 , "not code" , 0 );
+ return "";
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(DateNowLabeler& id){
+ _builder->appendDate(_fieldName, jsTime());
+ _fieldName = 0;
+ return *_builder;
+ }
+
+
+}
diff --git a/bson/bsondemo/bsondemo.cpp b/bson/bsondemo/bsondemo.cpp
new file mode 100644
index 0000000..b0da1b8
--- /dev/null
+++ b/bson/bsondemo/bsondemo.cpp
@@ -0,0 +1,107 @@
+/** @file bsondemo.cpp
+
+ Example of use of BSON from C++.
+
+ Requires boost (headers only).
+ Works headers only (the parts actually exercised herein that is - some functions require .cpp files).
+*/
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../bson.h"
+#include <iostream>
+#include <vector>
+
+using namespace std;
+using namespace bson;
+
+void iter(bo o) {
+ /* iterator example */
+ cout << "\niter()\n";
+ for( bo::iterator i(o); i.more(); ) {
+ cout << ' ' << i.next().toString() << '\n';
+ }
+}
+
+int main()
+{
+ cout << "build bits: " << 8 * sizeof(char *) << '\n' << endl;
+
+ /* a bson object defaults on construction to { } */
+ bo empty;
+ cout << "empty: " << empty << endl;
+
+ /* make a simple { name : 'joe', age : 33.7 } object */
+ {
+ bob b;
+ b.append("name", "joe");
+ b.append("age", 33.7);
+ b.obj();
+ }
+
+ /* make { name : 'joe', age : 33.7 } with a more compact notation. */
+ bo x = bob().append("name", "joe").append("age", 33.7).obj();
+
+ /* convert from bson to json */
+ string json = x.toString();
+ cout << "json for x:" << json << endl;
+
+ /* access some fields of bson object x */
+ cout << "Some x things: " << x["name"] << ' ' << x["age"].Number() << ' ' << x.isEmpty() << endl;
+
+ /* make a bit more complex object with some nesting
+ { x : 'asdf', y : true, subobj : { z : 3, q : 4 } }
+ */
+ bo y = BSON( "x" << "asdf" << "y" << true << "subobj" << BSON( "z" << 3 << "q" << 4 ) );
+
+ /* print it */
+ cout << "y: " << y << endl;
+
+ /* reach in and get subobj.z */
+ cout << "subobj.z: " << y.getFieldDotted("subobj.z").Number() << endl;
+
+ /* alternate syntax: */
+ cout << "subobj.z: " << y["subobj"]["z"].Number() << endl;
+
+ /* fetch all *top level* elements from object y into a vector */
+ vector<be> v;
+ y.elems(v);
+ cout << v[0] << endl;
+
+ /* into an array */
+ list<be> L;
+ y.elems(L);
+
+ bo sub = y["subobj"].Obj();
+
+ /* grab all the int's that were in subobj. if it had elements that were not ints, we throw an exception
+ (capital V on Vals() means exception if wrong type found
+ */
+ vector<int> myints;
+ sub.Vals(myints);
+ cout << "my ints: " << myints[0] << ' ' << myints[1] << endl;
+
+ /* grab all the string values from x. if the field isn't of string type, just skip it --
+ lowercase v on vals() indicates skip don't throw.
+ */
+ vector<string> strs;
+ x.vals(strs);
+ cout << strs.size() << " strings, first one: " << strs[0] << endl;
+
+ iter(y);
+ return 0;
+}
diff --git a/msvc/mongos/mongos.vcproj b/bson/bsondemo/bsondemo.vcproj
index 058fa1e..8432ceb 100644
--- a/msvc/mongos/mongos.vcproj
+++ b/bson/bsondemo/bsondemo.vcproj
@@ -2,9 +2,9 @@
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
- Name="mongos"
- ProjectGUID="{942113AE-678B-4C7B-BC78-D91AB9C52390}"
- RootNamespace="mongos"
+ Name="bsondemo"
+ ProjectGUID="{C9DB5EB7-81AA-4185-BAA1-DA035654402F}"
+ RootNamespace="bsondemo"
Keyword="Win32Proj"
TargetFrameworkVersion="196613"
>
@@ -18,10 +18,9 @@
<Configurations>
<Configuration
Name="Debug|Win32"
- OutputDirectory="$(ConfigurationName)"
+ OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
- InheritedPropertySheets="..\mongo_app.vsprops"
CharacterSet="1"
>
<Tool
@@ -42,11 +41,12 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
+ AdditionalIncludeDirectories="c:\program files\boost\latest;c:\boost;\boost"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
- UsePrecompiledHeader="2"
+ UsePrecompiledHeader="0"
WarningLevel="3"
DebugInformationFormat="4"
/>
@@ -61,7 +61,6 @@
/>
<Tool
Name="VCLinkerTool"
- AdditionalDependencies="mongo_common.lib core_server.lib shard_server.lib ws2_32.lib"
LinkIncremental="2"
GenerateDebugInformation="true"
SubSystem="1"
@@ -87,15 +86,13 @@
/>
<Tool
Name="VCPostBuildEventTool"
- CommandLine="copy &quot;$(OutDir)\$(ProjectName).exe&quot; &quot;..\bin\$(ConfigurationName)\.&quot;"
/>
</Configuration>
<Configuration
Name="Release|Win32"
- OutputDirectory="$(ConfigurationName)"
+ OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
- InheritedPropertySheets="..\mongo_app.vsprops"
CharacterSet="1"
WholeProgramOptimization="1"
>
@@ -118,11 +115,11 @@
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
- WholeProgramOptimization="false"
+ AdditionalIncludeDirectories="c:\program files\boost\latest;c:\boost;\boost"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
- UsePrecompiledHeader="2"
+ UsePrecompiledHeader="0"
WarningLevel="3"
DebugInformationFormat="3"
/>
@@ -137,7 +134,6 @@
/>
<Tool
Name="VCLinkerTool"
- AdditionalDependencies="mongo_common.lib core_server.lib shard_server.lib ws2_32.lib"
LinkIncremental="1"
GenerateDebugInformation="true"
SubSystem="1"
@@ -165,7 +161,6 @@
/>
<Tool
Name="VCPostBuildEventTool"
- CommandLine="copy &quot;$(OutDir)\$(ProjectName).exe&quot; &quot;..\bin\$(ConfigurationName)\.&quot;"
/>
</Configuration>
</Configurations>
@@ -173,54 +168,74 @@
</References>
<Files>
<Filter
- Name="Header Files"
- Filter="h;hpp;hxx;hm;inl;inc;xsd"
- UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
+ Name="Source Files"
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
<File
- RelativePath="..\..\stdafx.h"
- >
- </File>
- <File
- RelativePath="..\..\targetver.h"
+ RelativePath=".\bsondemo.cpp"
>
</File>
</Filter>
<Filter
- Name="s"
+ Name="bson"
>
<File
- RelativePath="..\..\s\server.cpp"
+ RelativePath="..\bson.h"
>
</File>
<File
- RelativePath="..\..\s\server.h"
+ RelativePath="..\bson_db.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonelement.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsoninlines.h"
>
</File>
- </Filter>
- <Filter
- Name="Source Files"
- >
<File
- RelativePath="..\..\stdafx.cpp"
+ RelativePath="..\bsonmisc.h"
>
- <FileConfiguration
- Name="Debug|Win32"
+ </File>
+ <File
+ RelativePath="..\bsonobj.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonobjbuilder.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonobjiterator.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsontypes.h"
+ >
+ </File>
+ <File
+ RelativePath="..\oid.h"
+ >
+ </File>
+ <File
+ RelativePath="..\ordering.h"
+ >
+ </File>
+ <Filter
+ Name="util"
+ >
+ <File
+ RelativePath="..\util\builder.h"
>
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="1"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Release|Win32"
+ </File>
+ <File
+ RelativePath="..\util\misc.h"
>
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="1"
- />
- </FileConfiguration>
- </File>
+ </File>
+ </Filter>
</Filter>
</Files>
<Globals>
diff --git a/bson/bsondemo/bsondemo.vcxproj b/bson/bsondemo/bsondemo.vcxproj
new file mode 100644
index 0000000..bb82a50
--- /dev/null
+++ b/bson/bsondemo/bsondemo.vcxproj
@@ -0,0 +1,193 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{C9DB5EB7-81AA-4185-BAA1-DA035654402F}</ProjectGuid>
+ <RootNamespace>bsondemo</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="bsondemo.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\bson.h" />
+ <ClInclude Include="..\bson_db.h" />
+ <ClInclude Include="..\bsonelement.h" />
+ <ClInclude Include="..\bsoninlines.h" />
+ <ClInclude Include="..\bsonmisc.h" />
+ <ClInclude Include="..\bsonobj.h" />
+ <ClInclude Include="..\bsonobjbuilder.h" />
+ <ClInclude Include="..\bsonobjiterator.h" />
+ <ClInclude Include="..\bsontypes.h" />
+ <ClInclude Include="..\oid.h" />
+ <ClInclude Include="..\ordering.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\misc.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/bson/bsondemo/bsondemo.vcxproj.filters b/bson/bsondemo/bsondemo.vcxproj.filters
new file mode 100644
index 0000000..35f14d5
--- /dev/null
+++ b/bson/bsondemo/bsondemo.vcxproj.filters
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="bsondemo.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\ordering.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonelement.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsoninlines.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonmisc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobj.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobjbuilder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobjiterator.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsontypes.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\builder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\misc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\oid.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson_db.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <Filter Include="bson">
+ <UniqueIdentifier>{ea599740-3c6f-40dd-a121-e825d82ae4aa}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+</Project>
diff --git a/bson/bsonelement.h b/bson/bsonelement.h
new file mode 100644
index 0000000..2bbc640
--- /dev/null
+++ b/bson/bsonelement.h
@@ -0,0 +1,549 @@
+// BSONElement
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <string.h>
+#include "util/builder.h"
+
+namespace bson {
+ typedef mongo::BSONElement be;
+ typedef mongo::BSONObj bo;
+ typedef mongo::BSONObjBuilder bob;
+}
+
+namespace mongo {
+
+ class OpTime;
+ class BSONElement;
+
+ /* l and r MUST have same type when called: check that first. */
+ int compareElementValues(const BSONElement& l, const BSONElement& r);
+
+
+/** BSONElement represents an "element" in a BSONObj. So for the object { a : 3, b : "abc" },
+ 'a : 3' is the first element (key+value).
+
+ The BSONElement object points into the BSONObj's data. Thus the BSONObj must stay in scope
+ for the life of the BSONElement.
+
+ internals:
+ <type><fieldName ><value>
+ -------- size() ------------
+ -fieldNameSize-
+ value()
+ type()
+*/
+class BSONElement {
+public:
+ /** These functions, which start with a capital letter, throw a UserException if the
+ element is not of the required type. Example:
+
+ string foo = obj["foo"].String(); // exception if not a string type or DNE
+ */
+ string String() const { return chk(mongo::String).valuestr(); }
+ Date_t Date() const { return chk(mongo::Date).date(); }
+ double Number() const { return chk(isNumber()).number(); }
+ double Double() const { return chk(NumberDouble)._numberDouble(); }
+ long long Long() const { return chk(NumberLong)._numberLong(); }
+ int Int() const { return chk(NumberInt)._numberInt(); }
+ bool Bool() const { return chk(mongo::Bool).boolean(); }
+ BSONObj Obj() const;
+ vector<BSONElement> Array() const; // see implementation for detailed comments
+ mongo::OID OID() const { return chk(jstOID).__oid(); }
+ void Null() const { chk(isNull()); }
+ void OK() const { chk(ok()); }
+
+ /** populate v with the value of the element. If type does not match, throw exception.
+ useful in templates -- see also BSONObj::Vals().
+ */
+ void Val(Date_t& v) const { v = Date(); }
+ void Val(long long& v) const { v = Long(); }
+ void Val(bool& v) const { v = Bool(); }
+ void Val(BSONObj& v) const;
+ void Val(mongo::OID& v) const { v = OID(); }
+ void Val(int& v) const { v = Int(); }
+ void Val(double& v) const { v = Double(); }
+ void Val(string& v) const { v = String(); }
+
+ /** Use ok() to check if a value is assigned:
+ if( myObj["foo"].ok() ) ...
+ */
+ bool ok() const { return !eoo(); }
+
+ string toString( bool includeFieldName = true, bool full=false) const;
+ void toString(StringBuilder& s, bool includeFieldName = true, bool full=false) const;
+ string jsonString( JsonStringFormat format, bool includeFieldNames = true, int pretty = 0 ) const;
+ operator string() const { return toString(); }
+
+ /** Returns the type of the element */
+ BSONType type() const { return (BSONType) *data; }
+
+ /** retrieve a field within this element
+ throws exception if *this is not an embedded object
+ */
+ BSONElement operator[] (const string& field) const;
+
+ /** returns the tyoe of the element fixed for the main type
+ the main purpose is numbers. any numeric type will return NumberDouble
+ Note: if the order changes, indexes have to be re-built or than can be corruption
+ */
+ int canonicalType() const;
+
+ /** Indicates if it is the end-of-object element, which is present at the end of
+ every BSON object.
+ */
+ bool eoo() const { return type() == EOO; }
+
+ /** Size of the element.
+ @param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
+ */
+ int size( int maxLen = -1 ) const;
+
+ /** Wrap this element up as a singleton object. */
+ BSONObj wrap() const;
+
+ /** Wrap this element up as a singleton object with a new name. */
+ BSONObj wrap( const char* newName) const;
+
+ /** field name of the element. e.g., for
+ name : "Joe"
+ "name" is the fieldname
+ */
+ const char * fieldName() const {
+ if ( eoo() ) return ""; // no fieldname for it.
+ return data + 1;
+ }
+
+ /** raw data of the element's value (so be careful). */
+ const char * value() const {
+ return (data + fieldNameSize() + 1);
+ }
+ /** size in bytes of the element's value (when applicable). */
+ int valuesize() const {
+ return size() - fieldNameSize() - 1;
+ }
+
+ bool isBoolean() const { return type() == mongo::Bool; }
+
+ /** @return value of a boolean element.
+ You must assure element is a boolean before
+ calling. */
+ bool boolean() const {
+ return *value() ? true : false;
+ }
+
+ /** Retrieve a java style date value from the element.
+ Ensure element is of type Date before calling.
+ */
+ Date_t date() const {
+ return *reinterpret_cast< const Date_t* >( value() );
+ }
+
+ /** Convert the value to boolean, regardless of its type, in a javascript-like fashion
+ (i.e., treat zero and null as false).
+ */
+ bool trueValue() const;
+
+ /** True if number, string, bool, date, OID */
+ bool isSimpleType() const;
+
+ /** True if element is of a numeric type. */
+ bool isNumber() const;
+
+ /** Return double value for this field. MUST be NumberDouble type. */
+ double _numberDouble() const {return *reinterpret_cast< const double* >( value() ); }
+ /** Return double value for this field. MUST be NumberInt type. */
+ int _numberInt() const {return *reinterpret_cast< const int* >( value() ); }
+ /** Return double value for this field. MUST be NumberLong type. */
+ long long _numberLong() const {return *reinterpret_cast< const long long* >( value() ); }
+
+ /** Retrieve int value for the element safely. Zero returned if not a number. */
+ int numberInt() const;
+ /** Retrieve long value for the element safely. Zero returned if not a number. */
+ long long numberLong() const;
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
+ */
+ double numberDouble() const;
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
+ */
+ double number() const { return numberDouble(); }
+
+ /** Retrieve the object ID stored in the object.
+ You must ensure the element is of type jstOID first. */
+ const mongo::OID &__oid() const { return *reinterpret_cast< const mongo::OID* >( value() ); }
+
+ /** True if element is null. */
+ bool isNull() const {
+ return type() == jstNULL;
+ }
+
+ /** Size (length) of a string element.
+ You must assure of type String first. */
+ int valuestrsize() const {
+ return *reinterpret_cast< const int* >( value() );
+ }
+
+ // for objects the size *includes* the size of the size field
+ int objsize() const {
+ return *reinterpret_cast< const int* >( value() );
+ }
+
+ /** Get a string's value. Also gives you start of the real data for an embedded object.
+ You must assure data is of an appropriate type first -- see also valuestrsafe().
+ */
+ const char * valuestr() const {
+ return value() + 4;
+ }
+
+ /** Get the string value of the element. If not a string returns "". */
+ const char *valuestrsafe() const {
+ return type() == mongo::String ? valuestr() : "";
+ }
+ /** Get the string value of the element. If not a string returns "". */
+ string str() const {
+ return type() == mongo::String ? string(valuestr(), valuestrsize()-1) : string();
+ }
+
+ /** Get javascript code of a CodeWScope data element. */
+ const char * codeWScopeCode() const {
+ return value() + 8;
+ }
+ /** Get the scope SavedContext of a CodeWScope data element. */
+ const char * codeWScopeScopeData() const {
+ // TODO fix
+ return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
+ }
+
+ /** Get the embedded object this element holds. */
+ BSONObj embeddedObject() const;
+
+ /* uasserts if not an object */
+ BSONObj embeddedObjectUserCheck() const;
+
+ BSONObj codeWScopeObject() const;
+
+ /** Get raw binary data. Element must be of type BinData. Doesn't handle type 2 specially */
+ const char *binData(int& len) const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ assert( type() == BinData );
+ len = valuestrsize();
+ return value() + 5;
+ }
+ /** Get binary data. Element must be of type BinData. Handles type 2 */
+ const char *binDataClean(int& len) const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ if (binDataType() != ByteArrayDeprecated){
+ return binData(len);
+ } else {
+ // Skip extra size
+ len = valuestrsize() - 4;
+ return value() + 5 + 4;
+ }
+ }
+
+ BinDataType binDataType() const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ assert( type() == BinData );
+ unsigned char c = (value() + 4)[0];
+ return (BinDataType)c;
+ }
+
+ /** Retrieve the regex string for a Regex element */
+ const char *regex() const {
+ assert(type() == RegEx);
+ return value();
+ }
+
+ /** Retrieve the regex flags (options) for a Regex element */
+ const char *regexFlags() const {
+ const char *p = regex();
+ return p + strlen(p) + 1;
+ }
+
+ /** like operator== but doesn't check the fieldname,
+ just the value.
+ */
+ bool valuesEqual(const BSONElement& r) const {
+ return woCompare( r , false ) == 0;
+ }
+
+ /** Returns true if elements are equal. */
+ bool operator==(const BSONElement& r) const {
+ return woCompare( r , true ) == 0;
+ }
+
+ /** Well ordered comparison.
+ @return <0: l<r. 0:l==r. >0:l>r
+ order by type, field name, and field value.
+ If considerFieldName is true, pay attention to the field name.
+ */
+ int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
+
+ const char * rawdata() const {
+ return data;
+ }
+
+ /** 0 == Equality, just not defined yet */
+ int getGtLtOp( int def = 0 ) const;
+
+ /** Constructs an empty element */
+ BSONElement();
+
+ /** Check that data is internally consistent. */
+ void validate() const;
+
+ /** True if this element may contain subobjects. */
+ bool mayEncapsulate() const {
+ switch ( type() ){
+ case Object:
+ case mongo::Array:
+ case CodeWScope:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /** True if this element can be a BSONObj */
+ bool isABSONObj() const {
+ switch( type() ){
+ case Object:
+ case mongo::Array:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ Date_t timestampTime() const{
+ unsigned long long t = ((unsigned int*)(value() + 4 ))[0];
+ return t * 1000;
+ }
+ unsigned int timestampInc() const{
+ return ((unsigned int*)(value() ))[0];
+ }
+
+ const char * dbrefNS() const {
+ uassert( 10063 , "not a dbref" , type() == DBRef );
+ return value() + 4;
+ }
+
+ const mongo::OID& dbrefOID() const {
+ uassert( 10064 , "not a dbref" , type() == DBRef );
+ const char * start = value();
+ start += 4 + *reinterpret_cast< const int* >( start );
+ return *reinterpret_cast< const mongo::OID* >( start );
+ }
+
+ bool operator<( const BSONElement& other ) const {
+ int x = (int)canonicalType() - (int)other.canonicalType();
+ if ( x < 0 ) return true;
+ else if ( x > 0 ) return false;
+ return compareElementValues(*this,other) < 0;
+ }
+
+ // If maxLen is specified, don't scan more than maxLen bytes.
+ explicit BSONElement(const char *d, int maxLen = -1) : data(d) {
+ fieldNameSize_ = -1;
+ if ( eoo() )
+ fieldNameSize_ = 0;
+ else {
+ if ( maxLen != -1 ) {
+ int size = (int) strnlen( fieldName(), maxLen - 1 );
+ massert( 10333 , "Invalid field name", size != -1 );
+ fieldNameSize_ = size + 1;
+ }
+ }
+ totalSize = -1;
+ }
+
+ string _asCode() const;
+ OpTime _opTime() const;
+
+private:
+ const char *data;
+ mutable int fieldNameSize_; // cached value
+ int fieldNameSize() const {
+ if ( fieldNameSize_ == -1 )
+ fieldNameSize_ = (int)strlen( fieldName() ) + 1;
+ return fieldNameSize_;
+ }
+ mutable int totalSize; /* caches the computed size */
+
+ friend class BSONObjIterator;
+ friend class BSONObj;
+ const BSONElement& chk(int t) const {
+ if ( t != type() ){
+ StringBuilder ss;
+ ss << "wrong type for BSONElement (" << fieldName() << ") " << type() << " != " << t;
+ uasserted(13111, ss.str() );
+ }
+ return *this;
+ }
+ const BSONElement& chk(bool expr) const {
+ uassert(13118, "unexpected or missing type value in BSON object", expr);
+ return *this;
+ }
+};
+
+
+ inline int BSONElement::canonicalType() const {
+ BSONType t = type();
+ switch ( t ){
+ case MinKey:
+ case MaxKey:
+ return t;
+ case EOO:
+ case Undefined:
+ return 0;
+ case jstNULL:
+ return 5;
+ case NumberDouble:
+ case NumberInt:
+ case NumberLong:
+ return 10;
+ case mongo::String:
+ case Symbol:
+ return 15;
+ case Object:
+ return 20;
+ case mongo::Array:
+ return 25;
+ case BinData:
+ return 30;
+ case jstOID:
+ return 35;
+ case mongo::Bool:
+ return 40;
+ case mongo::Date:
+ case Timestamp:
+ return 45;
+ case RegEx:
+ return 50;
+ case DBRef:
+ return 55;
+ case Code:
+ return 60;
+ case CodeWScope:
+ return 65;
+ default:
+ assert(0);
+ return -1;
+ }
+ }
+
+ inline bool BSONElement::trueValue() const {
+ switch( type() ) {
+ case NumberLong:
+ return *reinterpret_cast< const long long* >( value() ) != 0;
+ case NumberDouble:
+ return *reinterpret_cast< const double* >( value() ) != 0;
+ case NumberInt:
+ return *reinterpret_cast< const int* >( value() ) != 0;
+ case mongo::Bool:
+ return boolean();
+ case EOO:
+ case jstNULL:
+ case Undefined:
+ return false;
+
+ default:
+ ;
+ }
+ return true;
+ }
+
+ /** True if element is of a numeric type. */
+ inline bool BSONElement::isNumber() const {
+ switch( type() ) {
+ case NumberLong:
+ case NumberDouble:
+ case NumberInt:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ inline bool BSONElement::isSimpleType() const {
+ switch( type() ){
+ case NumberLong:
+ case NumberDouble:
+ case NumberInt:
+ case mongo::String:
+ case mongo::Bool:
+ case mongo::Date:
+ case jstOID:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ inline double BSONElement::numberDouble() const {
+ switch( type() ) {
+ case NumberDouble:
+ return _numberDouble();
+ case NumberInt:
+ return *reinterpret_cast< const int* >( value() );
+ case NumberLong:
+ return (double) *reinterpret_cast< const long long* >( value() );
+ default:
+ return 0;
+ }
+ }
+
+ /** Retrieve int value for the element safely. Zero returned if not a number. */
+ inline int BSONElement::numberInt() const {
+ switch( type() ) {
+ case NumberDouble:
+ return (int) _numberDouble();
+ case NumberInt:
+ return _numberInt();
+ case NumberLong:
+ return (int) _numberLong();
+ default:
+ return 0;
+ }
+ }
+
+ /** Retrieve long value for the element safely. Zero returned if not a number. */
+ inline long long BSONElement::numberLong() const {
+ switch( type() ) {
+ case NumberDouble:
+ return (long long) _numberDouble();
+ case NumberInt:
+ return _numberInt();
+ case NumberLong:
+ return _numberLong();
+ default:
+ return 0;
+ }
+ }
+
+ inline BSONElement::BSONElement() {
+ static char z = 0;
+ data = &z;
+ fieldNameSize_ = 0;
+ totalSize = 1;
+ }
+
+}
diff --git a/bson/bsoninlines.h b/bson/bsoninlines.h
new file mode 100644
index 0000000..f4140a3
--- /dev/null
+++ b/bson/bsoninlines.h
@@ -0,0 +1,597 @@
+// bsoninlines.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <map>
+#include "util/atomic_int.h"
+#include "util/misc.h"
+#include "../util/hex.h"
+
+namespace mongo {
+
+ inline BSONObjIterator BSONObj::begin() {
+ return BSONObjIterator(*this);
+ }
+
+ inline BSONObj BSONElement::embeddedObjectUserCheck() const {
+ uassert( 10065 , "invalid parameter: expected an object", isABSONObj() );
+ return BSONObj(value());
+ }
+
+ inline BSONObj BSONElement::embeddedObject() const {
+ assert( isABSONObj() );
+ return BSONObj(value());
+ }
+
+ inline BSONObj BSONElement::codeWScopeObject() const {
+ assert( type() == CodeWScope );
+ int strSizeWNull = *(int *)( value() + 4 );
+ return BSONObj( value() + 4 + 4 + strSizeWNull );
+ }
+
+ inline BSONObj BSONObj::copy() const {
+ char *p = (char*) malloc(objsize());
+ memcpy(p, objdata(), objsize());
+ return BSONObj(p, true);
+ }
+
+ // wrap this element up as a singleton object.
+ inline BSONObj BSONElement::wrap() const {
+ BSONObjBuilder b(size()+6);
+ b.append(*this);
+ return b.obj();
+ }
+
+ inline BSONObj BSONElement::wrap( const char * newName ) const {
+ BSONObjBuilder b(size()+6+(int)strlen(newName));
+ b.appendAs(*this,newName);
+ return b.obj();
+ }
+
+
+ inline bool BSONObj::hasElement(const char *name) const {
+ if ( !isEmpty() ) {
+ BSONObjIterator it(*this);
+ while ( it.moreWithEOO() ) {
+ BSONElement e = it.next();
+ if ( strcmp(name, e.fieldName()) == 0 )
+ return true;
+ }
+ }
+ return false;
+ }
+
+ inline BSONElement BSONObj::getField(const StringData& name) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp(e.fieldName(), name.data()) == 0 )
+ return e;
+ }
+ return BSONElement();
+ }
+
+ /* add all the fields from the object specified to this object */
+ inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
+ BSONObjIterator it(x);
+ while ( it.moreWithEOO() ) {
+ BSONElement e = it.next();
+ if ( e.eoo() ) break;
+ append(e);
+ }
+ return *this;
+ }
+
+ inline bool BSONObj::isValid(){
+ int x = objsize();
+ return x > 0 && x <= 1024 * 1024 * 8;
+ }
+
+ inline bool BSONObj::getObjectID(BSONElement& e) const {
+ BSONElement f = getField("_id");
+ if( !f.eoo() ) {
+ e = f;
+ return true;
+ }
+ return false;
+ }
+
+ inline BSONObjBuilderValueStream::BSONObjBuilderValueStream( BSONObjBuilder * builder ) {
+ _fieldName = 0;
+ _builder = builder;
+ }
+
+ template<class T>
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( T value ) {
+ _builder->append(_fieldName, value);
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const BSONElement& e ) {
+ _builder->appendAs( e , _fieldName );
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::Label &l ) {
+ return Labeler( l, this );
+ }
+
+ inline void BSONObjBuilderValueStream::endField( const char *nextFieldName ) {
+ if ( _fieldName && haveSubobj() ) {
+ _builder->append( _fieldName, subobj()->done() );
+ }
+ _subobj.reset();
+ _fieldName = nextFieldName;
+ }
+
+ inline BSONObjBuilder *BSONObjBuilderValueStream::subobj() {
+ if ( !haveSubobj() )
+ _subobj.reset( new BSONObjBuilder() );
+ return _subobj.get();
+ }
+
+ template<class T> inline
+ BSONObjBuilder& Labeler::operator<<( T value ) {
+ s_->subobj()->append( l_.l_, value );
+ return *s_->_builder;
+ }
+
+ inline
+ BSONObjBuilder& Labeler::operator<<( const BSONElement& e ) {
+ s_->subobj()->appendAs( e, l_.l_ );
+ return *s_->_builder;
+ }
+
+ // {a: {b:1}} -> {a.b:1}
+ void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base="");
+ inline BSONObj nested2dotted(const BSONObj& obj){
+ BSONObjBuilder b;
+ nested2dotted(b, obj);
+ return b.obj();
+ }
+
+ // {a.b:1} -> {a: {b:1}}
+ void dotted2nested(BSONObjBuilder& b, const BSONObj& obj);
+ inline BSONObj dotted2nested(const BSONObj& obj){
+ BSONObjBuilder b;
+ dotted2nested(b, obj);
+ return b.obj();
+ }
+
+ inline BSONObjIterator BSONObjBuilder::iterator() const {
+ const char * s = _b.buf() + _offset;
+ const char * e = _b.buf() + _b.len();
+ return BSONObjIterator( s , e );
+ }
+
+ /* WARNING: nested/dotted conversions are not 100% reversible
+ * nested2dotted(dotted2nested({a.b: {c:1}})) -> {a.b.c: 1}
+ * also, dotted2nested ignores order
+ */
+
+ typedef map<string, BSONElement> BSONMap;
+ inline BSONMap bson2map(const BSONObj& obj){
+ BSONMap m;
+ BSONObjIterator it(obj);
+ while (it.more()){
+ BSONElement e = it.next();
+ m[e.fieldName()] = e;
+ }
+ return m;
+ }
+
+ struct BSONElementFieldNameCmp {
+ bool operator()( const BSONElement &l, const BSONElement &r ) const {
+ return strcmp( l.fieldName() , r.fieldName() ) <= 0;
+ }
+ };
+
+ typedef set<BSONElement, BSONElementFieldNameCmp> BSONSortedElements;
+ inline BSONSortedElements bson2set( const BSONObj& obj ){
+ BSONSortedElements s;
+ BSONObjIterator it(obj);
+ while ( it.more() )
+ s.insert( it.next() );
+ return s;
+ }
+
+ inline string BSONObj::toString( bool isArray, bool full ) const {
+ if ( isEmpty() ) return "{}";
+ StringBuilder s;
+ toString(s, isArray, full);
+ return s.str();
+ }
+ inline void BSONObj::toString(StringBuilder& s, bool isArray, bool full ) const {
+ if ( isEmpty() ){
+ s << "{}";
+ return;
+ }
+
+ s << ( isArray ? "[ " : "{ " );
+ BSONObjIterator i(*this);
+ bool first = true;
+ while ( 1 ) {
+ massert( 10327 , "Object does not end with EOO", i.moreWithEOO() );
+ BSONElement e = i.next( true );
+ massert( 10328 , "Invalid element size", e.size() > 0 );
+ massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) );
+ int offset = (int) (e.rawdata() - this->objdata());
+ massert( 10330 , "Element extends past end of object",
+ e.size() + offset <= this->objsize() );
+ e.validate();
+ bool end = ( e.size() + offset == this->objsize() );
+ if ( e.eoo() ) {
+ massert( 10331 , "EOO Before end of object", end );
+ break;
+ }
+ if ( first )
+ first = false;
+ else
+ s << ", ";
+ e.toString(s, !isArray, full );
+ }
+ s << ( isArray ? " ]" : " }" );
+ }
+
+ extern unsigned getRandomNumber();
+
+ inline void BSONElement::validate() const {
+ const BSONType t = type();
+
+ switch( t ) {
+ case DBRef:
+ case Code:
+ case Symbol:
+ case mongo::String: {
+ int x = valuestrsize();
+ if ( x > 0 && valuestr()[x-1] == 0 )
+ return;
+ StringBuilder buf;
+ buf << "Invalid dbref/code/string/symbol size: " << x << " strnlen:" << mongo::strnlen( valuestr() , x );
+ msgasserted( 10321 , buf.str() );
+ break;
+ }
+ case CodeWScope: {
+ int totalSize = *( int * )( value() );
+ massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
+ int strSizeWNull = *( int * )( value() + 4 );
+ massert( 10323 , "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
+ massert( 10324 , "Invalid CodeWScope string size",
+ strSizeWNull > 0 &&
+ (strSizeWNull - 1) == mongo::strnlen( codeWScopeCode(), strSizeWNull ) );
+ massert( 10325 , "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
+ int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
+ massert( 10326 , "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
+ // Subobject validation handled elsewhere.
+ }
+ case Object:
+ // We expect Object size validation to be handled elsewhere.
+ default:
+ break;
+ }
+ }
+
+ inline int BSONElement::size( int maxLen ) const {
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int remain = maxLen - fieldNameSize() - 1;
+
+ int x = 0;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case mongo::Bool:
+ x = 1;
+ break;
+ case NumberInt:
+ x = 4;
+ break;
+ case Timestamp:
+ case mongo::Date:
+ case NumberDouble:
+ case NumberLong:
+ x = 8;
+ break;
+ case jstOID:
+ x = 12;
+ break;
+ case Symbol:
+ case Code:
+ case mongo::String:
+ massert( 10313 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4;
+ break;
+ case CodeWScope:
+ massert( 10314 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = objsize();
+ break;
+
+ case DBRef:
+ massert( 10315 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4 + 12;
+ break;
+ case Object:
+ case mongo::Array:
+ massert( 10316 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = objsize();
+ break;
+ case BinData:
+ massert( 10317 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ size_t len1 = ( maxLen == -1 ) ? strlen( p ) : mongo::strnlen( p, remain );
+ //massert( 10318 , "Invalid regex string", len1 != -1 ); // ERH - 4/28/10 - don't think this does anything
+ p = p + len1 + 1;
+ size_t len2 = ( maxLen == -1 ) ? strlen( p ) : mongo::strnlen( p, remain - len1 - 1 );
+ //massert( 10319 , "Invalid regex options string", len2 != -1 ); // ERH - 4/28/10 - don't think this does anything
+ x = (int) (len1 + 1 + len2 + 1);
+ }
+ break;
+ default: {
+ StringBuilder ss;
+ ss << "BSONElement: bad type " << (int) type();
+ string msg = ss.str();
+ massert( 10320 , msg.c_str(),false);
+ }
+ }
+ totalSize = x + fieldNameSize() + 1; // BSONType
+
+ return totalSize;
+ }
+
+ inline string BSONElement::toString( bool includeFieldName, bool full ) const {
+ StringBuilder s;
+ toString(s, includeFieldName, full);
+ return s.str();
+ }
+ inline void BSONElement::toString(StringBuilder& s, bool includeFieldName, bool full ) const {
+ if ( includeFieldName && type() != EOO )
+ s << fieldName() << ": ";
+ switch ( type() ) {
+ case EOO:
+ s << "EOO";
+ break;
+ case mongo::Date:
+ s << "new Date(" << date() << ')';
+ break;
+ case RegEx:
+ {
+ s << "/" << regex() << '/';
+ const char *p = regexFlags();
+ if ( p ) s << p;
+ }
+ break;
+ case NumberDouble:
+ {
+ stringstream tmp;
+ tmp.precision( 16 );
+ tmp << number();
+ string n = tmp.str();
+ s << n;
+ // indicate this is a double:
+ if( strchr(n.c_str(), '.') == 0 && strchr(n.c_str(), 'E') == 0 && strchr(n.c_str(), 'N') == 0 )
+ s << ".0";
+ }
+ break;
+ case NumberLong:
+ s << _numberLong();
+ break;
+ case NumberInt:
+ s << _numberInt();
+ break;
+ case mongo::Bool:
+ s << ( boolean() ? "true" : "false" );
+ break;
+ case Object:
+ embeddedObject().toString(s, false, full);
+ break;
+ case mongo::Array:
+ embeddedObject().toString(s, true, full);
+ break;
+ case Undefined:
+ s << "undefined";
+ break;
+ case jstNULL:
+ s << "null";
+ break;
+ case MaxKey:
+ s << "MaxKey";
+ break;
+ case MinKey:
+ s << "MinKey";
+ break;
+ case CodeWScope:
+ s << "CodeWScope( "
+ << codeWScopeCode() << ", " << codeWScopeObject().toString(false, full) << ")";
+ break;
+ case Code:
+ if ( !full && valuestrsize() > 80 ) {
+ s.write(valuestr(), 70);
+ s << "...";
+ } else {
+ s.write(valuestr(), valuestrsize()-1);
+ }
+ break;
+ case Symbol:
+ case mongo::String:
+ s << '"';
+ if ( !full && valuestrsize() > 80 ) {
+ s.write(valuestr(), 70);
+ s << "...\"";
+ } else {
+ s.write(valuestr(), valuestrsize()-1);
+ s << '"';
+ }
+ break;
+ case DBRef:
+ s << "DBRef('" << valuestr() << "',";
+ {
+ mongo::OID *x = (mongo::OID *) (valuestr() + valuestrsize());
+ s << *x << ')';
+ }
+ break;
+ case jstOID:
+ s << "ObjectId('";
+ s << __oid() << "')";
+ break;
+ case BinData:
+ s << "BinData";
+ if (full){
+ int len;
+ const char* data = binDataClean(len);
+ s << '(' << binDataType() << ", " << toHex(data, len) << ')';
+ }
+ break;
+ case Timestamp:
+ s << "Timestamp " << timestampTime() << "|" << timestampInc();
+ break;
+ default:
+ s << "?type=" << type();
+ break;
+ }
+ }
+
+ /* return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ inline BSONElement BSONObj::getFieldDotted(const char *name) const {
+ BSONElement e = getField( name );
+ if ( e.eoo() ) {
+ const char *p = strchr(name, '.');
+ if ( p ) {
+ string left(name, p-name);
+ BSONObj sub = getObjectField(left.c_str());
+ return sub.isEmpty() ? BSONElement() : sub.getFieldDotted(p+1);
+ }
+ }
+
+ return e;
+ }
+
+ inline BSONObj BSONObj::getObjectField(const char *name) const {
+ BSONElement e = getField(name);
+ BSONType t = e.type();
+ return t == Object || t == Array ? e.embeddedObject() : BSONObj();
+ }
+
+ inline int BSONObj::nFields() const {
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ n++;
+ }
+ return n;
+ }
+
+ inline BSONObj::BSONObj() {
+ /* LITTLE ENDIAN */
+ static char p[] = { 5, 0, 0, 0, 0 };
+ _objdata = p;
+ }
+
+ inline BSONObj BSONElement::Obj() const { return embeddedObjectUserCheck(); }
+
+ inline BSONElement BSONElement::operator[] (const string& field) const {
+ BSONObj o = Obj();
+ return o[field];
+ }
+
+ inline void BSONObj::elems(vector<BSONElement> &v) const {
+ BSONObjIterator i(*this);
+ while( i.more() )
+ v.push_back(i.next());
+ }
+
+ inline void BSONObj::elems(list<BSONElement> &v) const {
+ BSONObjIterator i(*this);
+ while( i.more() )
+ v.push_back(i.next());
+ }
+
+ template <class T>
+ void BSONObj::Vals(vector<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ }
+ template <class T>
+ void BSONObj::Vals(list<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ }
+
+ template <class T>
+ void BSONObj::vals(vector<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ try {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ } catch(...) { }
+ }
+ }
+ template <class T>
+ void BSONObj::vals(list<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ try {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ } catch(...) { }
+ }
+ }
+
+ inline ostream& operator<<( ostream &s, const BSONObj &o ) {
+ return s << o.toString();
+ }
+
+ inline ostream& operator<<( ostream &s, const BSONElement &e ) {
+ return s << e.toString();
+ }
+
+ inline void BSONElement::Val(BSONObj& v) const { v = Obj(); }
+
+ template<typename T>
+ inline BSONFieldValue<BSONObj> BSONField<T>::query( const char * q , const T& t ) const {
+ BSONObjBuilder b;
+ b.append( q , t );
+ return BSONFieldValue<BSONObj>( _name , b.obj() );
+ }
+}
diff --git a/bson/bsonmisc.h b/bson/bsonmisc.h
new file mode 100644
index 0000000..40ec6d3
--- /dev/null
+++ b/bson/bsonmisc.h
@@ -0,0 +1,195 @@
+// @file bsonmisc.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ int getGtLtOp(const BSONElement& e);
+
+ struct BSONElementCmpWithoutField {
+ bool operator()( const BSONElement &l, const BSONElement &r ) const {
+ return l.woCompare( r, false ) < 0;
+ }
+ };
+
+ class BSONObjCmp {
+ public:
+ BSONObjCmp( const BSONObj &_order = BSONObj() ) : order( _order ) {}
+ bool operator()( const BSONObj &l, const BSONObj &r ) const {
+ return l.woCompare( r, order ) < 0;
+ }
+ private:
+ BSONObj order;
+ };
+
+ class BSONObjCmpDefaultOrder : public BSONObjCmp {
+ public:
+ BSONObjCmpDefaultOrder() : BSONObjCmp( BSONObj() ) {}
+ };
+
+ typedef set< BSONObj, BSONObjCmpDefaultOrder > BSONObjSetDefaultOrder;
+
+ enum FieldCompareResult {
+ LEFT_SUBFIELD = -2,
+ LEFT_BEFORE = -1,
+ SAME = 0,
+ RIGHT_BEFORE = 1 ,
+ RIGHT_SUBFIELD = 2
+ };
+
+ FieldCompareResult compareDottedFieldNames( const string& l , const string& r );
+
+/** Use BSON macro to build a BSONObj from a stream
+
+ e.g.,
+ BSON( "name" << "joe" << "age" << 33 )
+
+ with auto-generated object id:
+ BSON( GENOID << "name" << "joe" << "age" << 33 )
+
+ The labels GT, GTE, LT, LTE, NE can be helpful for stream-oriented construction
+ of a BSONObj, particularly when assembling a Query. For example,
+ BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
+ { a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
+*/
+#define BSON(x) (( mongo::BSONObjBuilder(64) << x ).obj())
+
+/** Use BSON_ARRAY macro like BSON macro, but without keys
+
+ BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+
+ */
+#define BSON_ARRAY(x) (( mongo::BSONArrayBuilder() << x ).arr())
+
+ /* Utility class to auto assign object IDs.
+ Example:
+ cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 }
+ */
+ extern struct GENOIDLabeler { } GENOID;
+
+ /* Utility class to add a Date element with the current time
+ Example:
+ cout << BSON( "created" << DATENOW ); // { created : "2009-10-09 11:41:42" }
+ */
+ extern struct DateNowLabeler { } DATENOW;
+
+ // Utility class to implement GT, GTE, etc as described above.
+ class Labeler {
+ public:
+ struct Label {
+ Label( const char *l ) : l_( l ) {}
+ const char *l_;
+ };
+ Labeler( const Label &l, BSONObjBuilderValueStream *s ) : l_( l ), s_( s ) {}
+ template<class T>
+ BSONObjBuilder& operator<<( T value );
+
+ /* the value of the element e is appended i.e. for
+ "age" << GT << someElement
+ one gets
+ { age : { $gt : someElement's value } }
+ */
+ BSONObjBuilder& operator<<( const BSONElement& e );
+ private:
+ const Label &l_;
+ BSONObjBuilderValueStream *s_;
+ };
+
+ extern Labeler::Label GT;
+ extern Labeler::Label GTE;
+ extern Labeler::Label LT;
+ extern Labeler::Label LTE;
+ extern Labeler::Label NE;
+ extern Labeler::Label SIZE;
+
+
+ // $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT << 6));
+ // becomes : {$or: [{x: {$gt: 7}}, {y: {$lt: 6}}]}
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f);
+ // definitions in bsonobjbuilder.h b/c of incomplete types
+
+ // Utility class to implement BSON( key << val ) as described above.
+ class BSONObjBuilderValueStream : public boost::noncopyable {
+ public:
+ friend class Labeler;
+ BSONObjBuilderValueStream( BSONObjBuilder * builder );
+
+ BSONObjBuilder& operator<<( const BSONElement& e );
+
+ template<class T>
+ BSONObjBuilder& operator<<( T value );
+
+ BSONObjBuilder& operator<<(DateNowLabeler& id);
+
+ Labeler operator<<( const Labeler::Label &l );
+
+ void endField( const char *nextFieldName = 0 );
+ bool subobjStarted() const { return _fieldName != 0; }
+
+ private:
+ const char * _fieldName;
+ BSONObjBuilder * _builder;
+
+ bool haveSubobj() const { return _subobj.get() != 0; }
+ BSONObjBuilder *subobj();
+ auto_ptr< BSONObjBuilder > _subobj;
+ };
+
+ /**
+ used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
+ */
+ class BSONSizeTracker {
+ public:
+ BSONSizeTracker(){
+ _pos = 0;
+ for ( int i=0; i<SIZE; i++ )
+ _sizes[i] = 512; // this is the default, so just be consistent
+ }
+
+ ~BSONSizeTracker(){
+ }
+
+ void got( int size ){
+ _sizes[_pos++] = size;
+ if ( _pos >= SIZE )
+ _pos = 0;
+ }
+
+ /**
+ * right now choosing largest size
+ */
+ int getSize() const {
+ int x = 16; // sane min
+ for ( int i=0; i<SIZE; i++ ){
+ if ( _sizes[i] > x )
+ x = _sizes[i];
+ }
+ return x;
+ }
+
+ private:
+ enum { SIZE = 10 };
+ int _pos;
+ int _sizes[SIZE];
+ };
+
+}
diff --git a/bson/bsonobj.h b/bson/bsonobj.h
new file mode 100644
index 0000000..0e99f28
--- /dev/null
+++ b/bson/bsonobj.h
@@ -0,0 +1,394 @@
+// @file bsonobj.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <set>
+#include <list>
+#include <vector>
+#include "util/builder.h"
+#include "stringdata.h"
+
+namespace mongo {
+
+ typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
+
+ /**
+ C++ representation of a "BSON" object -- that is, an extended JSON-style
+ object in a binary representation.
+
+ See bsonspec.org.
+
+ Note that BSONObj's have a smart pointer capability built in -- so you can
+ pass them around by value. The reference counts used to implement this
+ do not use locking, so copying and destroying BSONObj's are not thread-safe
+ operations.
+
+ BSON object format:
+
+ \code
+ <unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
+
+ totalSize includes itself.
+
+ Data:
+ Bool: <byte>
+ EOO: nothing follows
+ Undefined: nothing follows
+ OID: an OID object
+ NumberDouble: <double>
+ NumberInt: <int32>
+ String: <unsigned32 strsizewithnull><cstring>
+ Date: <8bytes>
+ Regex: <cstring regex><cstring options>
+ Object: a nested object, leading with its entire size, which terminates with EOO.
+ Array: same as object
+ DBRef: <strlen> <cstring ns> <oid>
+ DBRef: a database reference: basically a collection name plus an Object ID
+ BinData: <int len> <byte subtype> <byte[len] data>
+ Code: a function (not a closure): same format as String.
+ Symbol: a language symbol (say a python symbol). same format as String.
+ Code With Scope: <total size><String><Object>
+ \endcode
+ */
+ class BSONObj {
+ public:
+ /** Construct a BSONObj from data in the proper format.
+ @param ifree true if the BSONObj should free() the msgdata when
+ it destructs.
+ */
+ explicit BSONObj(const char *msgdata, bool ifree = false) {
+ init(msgdata, ifree);
+ }
+ BSONObj(const Record *r);
+ /** Construct an empty BSONObj -- that is, {}. */
+ BSONObj();
+ // defensive
+ ~BSONObj() { _objdata = 0; }
+
+ void appendSelfToBufBuilder(BufBuilder& b) const {
+ assert( objsize() );
+ b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
+ }
+
+ /** Readable representation of a BSON object in an extended JSON-style notation.
+ This is an abbreviated representation which might be used for logging.
+ */
+ string toString( bool isArray = false, bool full=false ) const;
+ void toString(StringBuilder& s, bool isArray = false, bool full=false ) const;
+
+ /** Properly formatted JSON string.
+ @param pretty if true we try to add some lf's and indentation
+ */
+ string jsonString( JsonStringFormat format = Strict, int pretty = 0 ) const;
+
+ /** note: addFields always adds _id even if not specified */
+ int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+
+ /** returns # of top level fields in the object
+ note: iterates to count the fields
+ */
+ int nFields() const;
+
+ /** adds the field names to the fields set. does NOT clear it (appends). */
+ int getFieldNames(set<string>& fields) const;
+
+ /** return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ BSONElement getFieldDotted(const char *name) const;
+ /** return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ BSONElement getFieldDotted(const string& name) const {
+ return getFieldDotted( name.c_str() );
+ }
+
+ /** Like getFieldDotted(), but expands multikey arrays and returns all matching objects
+ */
+ void getFieldsDotted(const StringData& name, BSONElementSet &ret ) const;
+ /** Like getFieldDotted(), but returns first array encountered while traversing the
+ dotted fields of name. The name variable is updated to represent field
+ names with respect to the returned element. */
+ BSONElement getFieldDottedOrArray(const char *&name) const;
+
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
+ */
+ BSONElement getField(const StringData& name) const;
+
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
+ */
+ BSONElement operator[] (const char *field) const {
+ return getField(field);
+ }
+
+ BSONElement operator[] (const string& field) const {
+ return getField(field);
+ }
+
+ BSONElement operator[] (int field) const {
+ StringBuilder ss;
+ ss << field;
+ string s = ss.str();
+ return getField(s.c_str());
+ }
+
+ /** @return true if field exists */
+ bool hasField( const char * name )const {
+ return ! getField( name ).eoo();
+ }
+
+ /** @return "" if DNE or wrong type */
+ const char * getStringField(const char *name) const;
+
+ /** @return subobject of the given name */
+ BSONObj getObjectField(const char *name) const;
+
+ /** @return INT_MIN if not present - does some type conversions */
+ int getIntField(const char *name) const;
+
+ /** @return false if not present */
+ bool getBoolField(const char *name) const;
+
+ /**
+ sets element field names to empty string
+ If a field in pattern is missing, it is omitted from the returned
+ object.
+ */
+ BSONObj extractFieldsUnDotted(BSONObj pattern) const;
+
+ /** extract items from object which match a pattern object.
+ e.g., if pattern is { x : 1, y : 1 }, builds an object with
+ x and y elements of this object, if they are present.
+ returns elements with original field names
+ */
+ BSONObj extractFields(const BSONObj &pattern , bool fillWithNull=false) const;
+
+ BSONObj filterFieldsUndotted(const BSONObj &filter, bool inFilter) const;
+
+ BSONElement getFieldUsingIndexNames(const char *fieldName, const BSONObj &indexKey) const;
+
+ /** @return the raw data of the object */
+ const char *objdata() const {
+ return _objdata;
+ }
+ /** @return total size of the BSON object in bytes */
+ int objsize() const {
+ return *(reinterpret_cast<const int*>(objdata()));
+ }
+
+ /** performs a cursory check on the object's size only. */
+ bool isValid();
+
+ /** @return if the user is a valid user doc
+ criter: isValid() no . or $ field names
+ */
+ bool okForStorage() const;
+
+ /** @return true if object is empty -- i.e., {} */
+ bool isEmpty() const {
+ return objsize() <= 5;
+ }
+
+ void dump() const;
+
+ /** Alternative output format */
+ string hexDump() const;
+
+ /**wo='well ordered'. fields must be in same order in each object.
+ Ordering is with respect to the signs of the elements
+ and allows ascending / descending key mixing.
+ @return <0 if l<r. 0 if l==r. >0 if l>r
+ */
+ int woCompare(const BSONObj& r, const Ordering &o,
+ bool considerFieldName=true) const;
+
+ /**wo='well ordered'. fields must be in same order in each object.
+ Ordering is with respect to the signs of the elements
+ and allows ascending / descending key mixing.
+ @return <0 if l<r. 0 if l==r. >0 if l>r
+ */
+ int woCompare(const BSONObj& r, const BSONObj &ordering = BSONObj(),
+ bool considerFieldName=true) const;
+
+
+ bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
+ bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
+ bool operator>( const BSONObj& other ) const { return woCompare( other ) > 0; }
+ bool operator>=( const BSONObj& other ) const { return woCompare( other ) >= 0; }
+
+ /**
+ * @param useDotted whether to treat sort key fields as possibly dotted and expand into them
+ */
+ int woSortOrder( const BSONObj& r , const BSONObj& sortKey , bool useDotted=false ) const;
+
+ /** This is "shallow equality" -- ints and doubles won't match. for a
+ deep equality test use woCompare (which is slower).
+ */
+ bool woEqual(const BSONObj& r) const {
+ int os = objsize();
+ if ( os == r.objsize() ) {
+ return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
+ }
+ return false;
+ }
+
+ /** @return first field of the object */
+ BSONElement firstElement() const {
+ return BSONElement(objdata() + 4);
+ }
+
+ /** @return true if field exists in the object */
+ bool hasElement(const char *name) const;
+
+ /** Get the _id field from the object. For good performance drivers should
+ assure that _id is the first element of the object; however, correct operation
+ is assured regardless.
+ @return true if found
+ */
+ bool getObjectID(BSONElement& e) const;
+
+ /** makes a copy of the object. */
+ BSONObj copy() const;
+
+ /* make sure the data buffer is under the control of this BSONObj and not a remote buffer */
+ BSONObj getOwned() const{
+ if ( !isOwned() )
+ return copy();
+ return *this;
+ }
+ bool isOwned() const { return _holder.get() != 0; }
+
+ /** @return A hash code for the object */
+ int hash() const {
+ unsigned x = 0;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ )
+ x = x * 131 + p[i];
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
+
+ // Return a version of this object where top level elements of types
+ // that are not part of the bson wire protocol are replaced with
+ // string identifier equivalents.
+ // TODO Support conversion of element types other than min and max.
+ BSONObj clientReadable() const;
+
+ /** Return new object with the field names replaced by those in the
+ passed object. */
+ BSONObj replaceFieldNames( const BSONObj &obj ) const;
+
+ /** true unless corrupt */
+ bool valid() const;
+
+ /** @return an md5 value for this object. */
+ string md5() const;
+
+ bool operator==( const BSONObj& other ) const{
+ return woCompare( other ) == 0;
+ }
+
+ enum MatchType {
+ Equality = 0,
+ LT = 0x1,
+ LTE = 0x3,
+ GTE = 0x6,
+ GT = 0x4,
+ opIN = 0x8, // { x : { $in : [1,2,3] } }
+ NE = 0x9,
+ opSIZE = 0x0A,
+ opALL = 0x0B,
+ NIN = 0x0C,
+ opEXISTS = 0x0D,
+ opMOD = 0x0E,
+ opTYPE = 0x0F,
+ opREGEX = 0x10,
+ opOPTIONS = 0x11,
+ opELEM_MATCH = 0x12,
+ opNEAR = 0x13,
+ opWITHIN = 0x14,
+ opMAX_DISTANCE=0x15
+ };
+
+ /** add all elements of the object to the specified vector */
+ void elems(vector<BSONElement> &) const;
+ /** add all elements of the object to the specified list */
+ void elems(list<BSONElement> &) const;
+
+ /** add all values of the object to the specified vector. If type mismatches, exception. */
+ template <class T>
+ void Vals(vector<T> &) const;
+ /** add all values of the object to the specified list. If type mismatches, exception. */
+ template <class T>
+ void Vals(list<T> &) const;
+
+ /** add all values of the object to the specified vector. If type mismatches, skip. */
+ template <class T>
+ void vals(vector<T> &) const;
+ /** add all values of the object to the specified list. If type mismatches, skip. */
+ template <class T>
+ void vals(list<T> &) const;
+
+ friend class BSONObjIterator;
+ typedef BSONObjIterator iterator;
+ BSONObjIterator begin();
+
+private:
+ class Holder {
+ public:
+ Holder( const char *objdata ) :
+ _objdata( objdata ) {
+ }
+ ~Holder() {
+ free((void *)_objdata);
+ _objdata = 0;
+ }
+ private:
+ const char *_objdata;
+ };
+ const char *_objdata;
+ boost::shared_ptr< Holder > _holder;
+ void init(const char *data, bool ifree) {
+ if ( ifree )
+ _holder.reset( new Holder( data ) );
+ _objdata = data;
+ if ( ! isValid() ){
+ StringBuilder ss;
+ int os = objsize();
+ ss << "Invalid BSONObj spec size: " << os << " (" << toHex( &os, 4 ) << ")";
+ try {
+ BSONElement e = firstElement();
+ ss << " first element:" << e.toString() << " ";
+ }
+ catch ( ... ){}
+ string s = ss.str();
+ massert( 10334 , s , 0 );
+ }
+ }
+ };
+ ostream& operator<<( ostream &s, const BSONObj &o );
+ ostream& operator<<( ostream &s, const BSONElement &e );
+
+ struct BSONArray : BSONObj {
+ // Don't add anything other than forwarding constructors!!!
+ BSONArray(): BSONObj() {}
+ explicit BSONArray(const BSONObj& obj): BSONObj(obj) {}
+ };
+
+}
diff --git a/bson/bsonobjbuilder.h b/bson/bsonobjbuilder.h
new file mode 100644
index 0000000..fdfe4de
--- /dev/null
+++ b/bson/bsonobjbuilder.h
@@ -0,0 +1,749 @@
+/* bsonobjbuilder.h
+
+ Classes in this file:
+ BSONObjBuilder
+ BSONArrayBuilder
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <limits>
+#include <cmath>
+using namespace std;
+
+namespace mongo {
+
+#if defined(_WIN32)
+// warning: 'this' : used in base member initializer list
+#pragma warning( disable : 4355 )
+#endif
+
+ template<typename T>
+ class BSONFieldValue {
+ public:
+ BSONFieldValue( const string& name , const T& t ){
+ _name = name;
+ _t = t;
+ }
+
+ const T& value() const { return _t; }
+ const string& name() const { return _name; }
+
+ private:
+ string _name;
+ T _t;
+ };
+
+ template<typename T>
+ class BSONField {
+ public:
+ BSONField( const string& name , const string& longName="" )
+ : _name(name), _longName(longName){}
+ const string& name() const { return _name; }
+ operator string() const { return _name; }
+
+ BSONFieldValue<T> make( const T& t ) const {
+ return BSONFieldValue<T>( _name , t );
+ }
+
+ BSONFieldValue<BSONObj> gt( const T& t ) const { return query( "$gt" , t ); }
+ BSONFieldValue<BSONObj> lt( const T& t ) const { return query( "$lt" , t ); }
+
+ BSONFieldValue<BSONObj> query( const char * q , const T& t ) const;
+
+ BSONFieldValue<T> operator()( const T& t ) const {
+ return BSONFieldValue<T>( _name , t );
+ }
+
+ private:
+ string _name;
+ string _longName;
+ };
+
+ /** Utility for creating a BSONObj.
+ See also the BSON() and BSON_ARRAY() macros.
+ */
+ class BSONObjBuilder : boost::noncopyable {
+ public:
+ /** @param initsize this is just a hint as to the final size of the object */
+ BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize), _offset( 0 ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.skip(4); /*leave room for size field*/
+ }
+
+ /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder */
+ BSONObjBuilder( BufBuilder &baseBuilder ) : _b( baseBuilder ), _buf( 0 ), _offset( baseBuilder.len() ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.skip( 4 );
+ }
+
+ BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() ), _offset(0), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
+ _b.skip( 4 );
+ }
+
+ ~BSONObjBuilder(){
+ if ( !_doneCalled && _b.buf() && _buf.getSize() == 0 ){
+ _done();
+ }
+ }
+
+ /** add all the fields from the object specified to this object */
+ BSONObjBuilder& appendElements(BSONObj x);
+
+ /** append element to the object we are building */
+ BSONObjBuilder& append( const BSONElement& e) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ _b.appendBuf((void*) e.rawdata(), e.size());
+ return *this;
+ }
+
+ /** append an element but with a new name */
+ BSONObjBuilder& appendAs(const BSONElement& e, const StringData& fieldName) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ _b.appendNum((char) e.type());
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) e.value(), e.valuesize());
+ return *this;
+ }
+
+ /** add a subobject as a member */
+ BSONObjBuilder& append(const StringData& fieldName, BSONObj subObj) {
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) subObj.objdata(), subObj.objsize());
+ return *this;
+ }
+
+ /** add a subobject as a member */
+ BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ){
+ assert( objdata );
+ if ( size == 0 ){
+ size = *((int*)objdata);
+ }
+
+ assert( size > 4 && size < 100000000 );
+
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void*)objdata, size );
+ return *this;
+ }
+
+
+ /** add header for a new subobject and return bufbuilder for writing to
+ the subobject's body */
+ BufBuilder &subobjStart(const StringData& fieldName) {
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ return _b;
+ }
+
+ /** add a subobject as a member with type Array. Thus arr object should have "0", "1", ...
+ style fields in it.
+ */
+ BSONObjBuilder& appendArray(const StringData& fieldName, const BSONObj &subObj) {
+ _b.appendNum((char) Array);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) subObj.objdata(), subObj.objsize());
+ return *this;
+ }
+ BSONObjBuilder& append(const StringData& fieldName, BSONArray arr) {
+ return appendArray(fieldName, arr);
+ }
+
+ /** add header for a new subarray and return bufbuilder for writing to
+ the subarray's body */
+ BufBuilder &subarrayStart(const StringData& fieldName) {
+ _b.appendNum((char) Array);
+ _b.appendStr(fieldName);
+ return _b;
+ }
+
+ /** Append a boolean element */
+ BSONObjBuilder& appendBool(const StringData& fieldName, int val) {
+ _b.appendNum((char) Bool);
+ _b.appendStr(fieldName);
+ _b.appendNum((char) (val?1:0));
+ return *this;
+ }
+
+ /** Append a boolean element */
+ BSONObjBuilder& append(const StringData& fieldName, bool val) {
+ _b.appendNum((char) Bool);
+ _b.appendStr(fieldName);
+ _b.appendNum((char) (val?1:0));
+ return *this;
+ }
+
+ /** Append a 32 bit integer element */
+ BSONObjBuilder& append(const StringData& fieldName, int n) {
+ _b.appendNum((char) NumberInt);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** Append a 32 bit unsigned element - cast to a signed int. */
+ BSONObjBuilder& append(const StringData& fieldName, unsigned n) {
+ return append(fieldName, (int) n);
+ }
+
+ /** Append a NumberLong */
+ BSONObjBuilder& append(const StringData& fieldName, long long n) {
+ _b.appendNum((char) NumberLong);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** appends a number. if n < max(int)/2 then uses int, otherwise long long */
+ BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long long n ){
+ long long x = n;
+ if ( x < 0 )
+ x = x * -1;
+ if ( x < ( numeric_limits<int>::max() / 2 ) )
+ append( fieldName , (int)n );
+ else
+ append( fieldName , n );
+ return *this;
+ }
+
+ /**
+ * appendNumber is a series of method for appending the smallest sensible type
+ * mostly for JS
+ */
+ BSONObjBuilder& appendNumber( const StringData& fieldName , int n ){
+ return append( fieldName , n );
+ }
+
+ BSONObjBuilder& appendNumber( const StringData& fieldName , double d ){
+ return append( fieldName , d );
+ }
+
+ BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ){
+ static long long maxInt = (int)pow( 2.0 , 30.0 );
+ static long long maxDouble = (long long)pow( 2.0 , 40.0 );
+
+ if ( l < maxInt )
+ append( fieldName , (int)l );
+ else if ( l < maxDouble )
+ append( fieldName , (double)l );
+ else
+ append( fieldName , l );
+ return *this;
+ }
+
+ /** Append a double element */
+ BSONObjBuilder& append(const StringData& fieldName, double n) {
+ _b.appendNum((char) NumberDouble);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** tries to append the data as a number
+ * @return true if the data was able to be converted to a number
+ */
+ bool appendAsNumber( const StringData& fieldName , const string& data );
+
+ /** Append a BSON Object ID (OID type).
+ @deprecated Generally, it is preferred to use the append append(name, oid)
+ method for this.
+ */
+ BSONObjBuilder& appendOID(const StringData& fieldName, OID *oid = 0 , bool generateIfBlank = false ) {
+ _b.appendNum((char) jstOID);
+ _b.appendStr(fieldName);
+ if ( oid )
+ _b.appendBuf( (void *) oid, 12 );
+ else {
+ OID tmp;
+ if ( generateIfBlank )
+ tmp.init();
+ else
+ tmp.clear();
+ _b.appendBuf( (void *) &tmp, 12 );
+ }
+ return *this;
+ }
+
+ /**
+ Append a BSON Object ID.
+ @param fieldName Field name, e.g., "_id".
+ @returns the builder object
+ */
+ BSONObjBuilder& append( const StringData& fieldName, OID oid ) {
+ _b.appendNum((char) jstOID);
+ _b.appendStr(fieldName);
+ _b.appendBuf( (void *) &oid, 12 );
+ return *this;
+ }
+
+ /**
+ Generate and assign an object id for the _id field.
+ _id should be the first element in the object for good performance.
+ */
+ BSONObjBuilder& genOID() {
+ return append("_id", OID::gen());
+ }
+
+ /** Append a time_t date.
+ @param dt a C-style 32 bit date value, that is
+ the number of seconds since January 1, 1970, 00:00:00 GMT
+ */
+ BSONObjBuilder& appendTimeT(const StringData& fieldName, time_t dt) {
+ _b.appendNum((char) Date);
+ _b.appendStr(fieldName);
+ _b.appendNum(static_cast<unsigned long long>(dt) * 1000);
+ return *this;
+ }
+ /** Append a date.
+ @param dt a Java-style 64 bit date value, that is
+ the number of milliseconds since January 1, 1970, 00:00:00 GMT
+ */
+ BSONObjBuilder& appendDate(const StringData& fieldName, Date_t dt) {
+ /* easy to pass a time_t to this and get a bad result. thus this warning. */
+#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
+ if( dt > 0 && dt <= 0xffffffff ) {
+ static int n;
+ if( n++ == 0 )
+ log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << endl;
+ }
+#endif
+ _b.appendNum((char) Date);
+ _b.appendStr(fieldName);
+ _b.appendNum(dt);
+ return *this;
+ }
+ BSONObjBuilder& append(const StringData& fieldName, Date_t dt) {
+ return appendDate(fieldName, dt);
+ }
+
+ /** Append a regular expression value
+ @param regex the regular expression pattern
+ @param regex options such as "i" or "g"
+ */
+ BSONObjBuilder& appendRegex(const StringData& fieldName, const char *regex, const char *options = "") {
+ _b.appendNum((char) RegEx);
+ _b.appendStr(fieldName);
+ _b.appendStr(regex);
+ _b.appendStr(options);
+ return *this;
+ }
+ /** Append a regular expression value
+ @param regex the regular expression pattern
+ @param regex options such as "i" or "g"
+ */
+ BSONObjBuilder& appendRegex(const StringData& fieldName, string regex, string options = "") {
+ return appendRegex(fieldName, regex.c_str(), options.c_str());
+ }
+ BSONObjBuilder& appendCode(const StringData& fieldName, const char *code) {
+ _b.appendNum((char) Code);
+ _b.appendStr(fieldName);
+ _b.appendNum((int) strlen(code)+1);
+ _b.appendStr(code);
+ return *this;
+ }
+ /** Append a string element. len DOES include terminating nul */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str, int len) {
+ _b.appendNum((char) String);
+ _b.appendStr(fieldName);
+ _b.appendNum((int)len);
+ _b.appendBuf(str, len);
+ return *this;
+ }
+ /** Append a string element */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str) {
+ return append(fieldName, str, (int) strlen(str)+1);
+ }
+ /** Append a string element */
+ BSONObjBuilder& append(const StringData& fieldName, string str) {
+ return append(fieldName, str.c_str(), (int) str.size()+1);
+ }
+ BSONObjBuilder& appendSymbol(const StringData& fieldName, const char *symbol) {
+ _b.appendNum((char) Symbol);
+ _b.appendStr(fieldName);
+ _b.appendNum((int) strlen(symbol)+1);
+ _b.appendStr(symbol);
+ return *this; }
+
+ /** Append a Null element to the object */
+ BSONObjBuilder& appendNull( const StringData& fieldName ) {
+ _b.appendNum( (char) jstNULL );
+ _b.appendStr( fieldName );
+ return *this; }
+
+ // Append an element that is less than all other keys.
+ BSONObjBuilder& appendMinKey( const StringData& fieldName ) {
+ _b.appendNum( (char) MinKey );
+ _b.appendStr( fieldName );
+ return *this;
+ }
+ // Append an element that is greater than all other keys.
+ BSONObjBuilder& appendMaxKey( const StringData& fieldName ) {
+ _b.appendNum( (char) MaxKey );
+ _b.appendStr( fieldName );
+ return *this;
+ }
+
+ // Append a Timestamp field -- will be updated to next OpTime on db insert.
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName ) {
+ _b.appendNum( (char) Timestamp );
+ _b.appendStr( fieldName );
+ _b.appendNum( (unsigned long long) 0 );
+ return *this;
+ }
+
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long val ) {
+ _b.appendNum( (char) Timestamp );
+ _b.appendStr( fieldName );
+ _b.appendNum( val );
+ return *this;
+ }
+
+ /**
+ Timestamps are a special BSON datatype that is used internally for replication.
+ Append a timestamp element to the object being ebuilt.
+ @param time - in millis (but stored in seconds)
+ */
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc );
+
+ /*
+ Append an element of the deprecated DBRef type.
+ @deprecated
+ */
+ BSONObjBuilder& appendDBRef( const StringData& fieldName, const char *ns, const OID &oid ) {
+ _b.appendNum( (char) DBRef );
+ _b.appendStr( fieldName );
+ _b.appendNum( (int) strlen( ns ) + 1 );
+ _b.appendStr( ns );
+ _b.appendBuf( (void *) &oid, 12 );
+ return *this;
+ }
+
+ /** Append a binary data element
+ @param fieldName name of the field
+ @param len length of the binary data in bytes
+ @param subtype subtype information for the data. @see enum BinDataType in bsontypes.h.
+ Use BinDataGeneral if you don't care about the type.
+ @param data the byte array
+ */
+ BSONObjBuilder& appendBinData( const StringData& fieldName, int len, BinDataType type, const char *data ) {
+ _b.appendNum( (char) BinData );
+ _b.appendStr( fieldName );
+ _b.appendNum( len );
+ _b.appendNum( (char) type );
+ _b.appendBuf( (void *) data, len );
+ return *this;
+ }
+ BSONObjBuilder& appendBinData( const StringData& fieldName, int len, BinDataType type, const unsigned char *data ) {
+ return appendBinData(fieldName, len, type, (const char *) data);
+ }
+
+ /**
+ Subtype 2 is deprecated.
+ Append a BSON bindata bytearray element.
+ @param data a byte array
+ @param len the length of data
+ */
+ BSONObjBuilder& appendBinDataArrayDeprecated( const char * fieldName , const char * data , int len ){
+ _b.appendNum( (char) BinData );
+ _b.appendStr( fieldName );
+ _b.appendNum( len + 4 );
+ _b.appendNum( (char)0x2 );
+ _b.appendNum( len );
+ _b.appendBuf( (void *) data, len );
+ return *this;
+ }
+
+ /** Append to the BSON object a field of type CodeWScope. This is a javascript code
+ fragment accompanied by some scope that goes with it.
+ */
+ BSONObjBuilder& appendCodeWScope( const StringData& fieldName, const char *code, const BSONObj &scope ) {
+ _b.appendNum( (char) CodeWScope );
+ _b.appendStr( fieldName );
+ _b.appendNum( ( int )( 4 + 4 + strlen( code ) + 1 + scope.objsize() ) );
+ _b.appendNum( ( int ) strlen( code ) + 1 );
+ _b.appendStr( code );
+ _b.appendBuf( ( void * )scope.objdata(), scope.objsize() );
+ return *this;
+ }
+
+ void appendUndefined( const StringData& fieldName ) {
+ _b.appendNum( (char) Undefined );
+ _b.appendStr( fieldName );
+ }
+
+ /* helper function -- see Query::where() for primary way to do this. */
+ void appendWhere( const char *code, const BSONObj &scope ){
+ appendCodeWScope( "$where" , code , scope );
+ }
+ void appendWhere( const string &code, const BSONObj &scope ){
+ appendWhere( code.c_str(), scope );
+ }
+
+ /**
+ these are the min/max when comparing, not strict min/max elements for a given type
+ */
+ void appendMinForType( const StringData& fieldName , int type );
+ void appendMaxForType( const StringData& fieldName , int type );
+
+ /** Append an array of values. */
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const vector< T >& vals );
+
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
+
+ /** The returned BSONObj will free the buffer when it is finished. */
+ BSONObj obj() {
+ bool own = owned();
+ massert( 10335 , "builder does not own memory", own );
+ int l;
+ return BSONObj(decouple(l), true);
+ }
+
+ /** Fetch the object we have built.
+ BSONObjBuilder still frees the object when the builder goes out of
+ scope -- very important to keep in mind. Use obj() if you
+ would like the BSONObj to last longer than the builder.
+ */
+ BSONObj done() {
+ return BSONObj(_done());
+ }
+
+ // Like 'done' above, but does not construct a BSONObj to return to the caller.
+ void doneFast() {
+ (void)_done();
+ }
+
+ /** Peek at what is in the builder, but leave the builder ready for more appends.
+ The returned object is only valid until the next modification or destruction of the builder.
+ Intended use case: append a field if not already there.
+ */
+ BSONObj asTempObj() {
+ BSONObj temp(_done());
+ _b.setlen(_b.len()-1); //next append should overwrite the EOO
+ _doneCalled = false;
+ return temp;
+ }
+
+ /* assume ownership of the buffer - you must then free it (with free()) */
+ char* decouple(int& l) {
+ char *x = _done();
+ assert( x );
+ l = _b.len();
+ _b.decouple();
+ return x;
+ }
+ void decouple() {
+ _b.decouple(); // post done() call version. be sure jsobj frees...
+ }
+
+ void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
+
+ static string numStr( int i ) {
+ if (i>=0 && i<100)
+ return numStrs[i];
+ StringBuilder o;
+ o << i;
+ return o.str();
+ }
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilderValueStream &operator<<(const char * name ) {
+ _s.endField( name );
+ return _s;
+ }
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilder& operator<<( GENOIDLabeler ) { return genOID(); }
+
+ // prevent implicit string conversions which would allow bad things like BSON( BSON( "foo" << 1 ) << 2 )
+ struct ForceExplicitString {
+ ForceExplicitString( const string &str ) : str_( str ) {}
+ string str_;
+ };
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilderValueStream &operator<<( const ForceExplicitString& name ) {
+ return operator<<( name.str_.c_str() );
+ }
+
+ Labeler operator<<( const Labeler::Label &l ) {
+ massert( 10336 , "No subobject started", _s.subobjStarted() );
+ return _s << l;
+ }
+
+ template<typename T>
+ BSONObjBuilderValueStream& operator<<( const BSONField<T>& f ) {
+ _s.endField( f.name().c_str() );
+ return _s;
+ }
+
+ template<typename T>
+ BSONObjBuilder& operator<<( const BSONFieldValue<T>& v ) {
+ append( v.name().c_str() , v.value() );
+ return *this;
+ }
+
+
+ /** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
+ bool owned() const { return &_b == &_buf; }
+
+ BSONObjIterator iterator() const ;
+
+ private:
+ char* _done() {
+ if ( _doneCalled )
+ return _b.buf() + _offset;
+
+ _doneCalled = true;
+ _s.endField();
+ _b.appendNum((char) EOO);
+ char *data = _b.buf() + _offset;
+ int size = _b.len() - _offset;
+ *((int*)data) = size;
+ if ( _tracker )
+ _tracker->got( size );
+ return data;
+ }
+
+ BufBuilder &_b;
+ BufBuilder _buf;
+ int _offset;
+ BSONObjBuilderValueStream _s;
+ BSONSizeTracker * _tracker;
+ bool _doneCalled;
+
+ static const string numStrs[100]; // cache of 0 to 99 inclusive
+ };
+
+ class BSONArrayBuilder : boost::noncopyable {
+ public:
+ BSONArrayBuilder() : _i(0), _b() {}
+ BSONArrayBuilder( BufBuilder &_b ) : _i(0), _b(_b) {}
+
+ template <typename T>
+ BSONArrayBuilder& append(const T& x){
+ _b.append(num().c_str(), x);
+ return *this;
+ }
+
+ BSONArrayBuilder& append(const BSONElement& e){
+ _b.appendAs(e, num());
+ return *this;
+ }
+
+ template <typename T>
+ BSONArrayBuilder& operator<<(const T& x){
+ return append(x);
+ }
+
+ void appendNull() {
+ _b.appendNull(num().c_str());
+ }
+
+ BSONArray arr(){ return BSONArray(_b.obj()); }
+
+ BSONObj done() { return _b.done(); }
+
+ void doneFast() { _b.doneFast(); }
+
+ template <typename T>
+ BSONArrayBuilder& append(const StringData& name, const T& x){
+ fill( name );
+ append( x );
+ return *this;
+ }
+
+ BufBuilder &subobjStart( const char *name = "0" ) {
+ fill( name );
+ return _b.subobjStart( num().c_str() );
+ }
+
+ BufBuilder &subarrayStart( const char *name ) {
+ fill( name );
+ return _b.subarrayStart( num().c_str() );
+ }
+
+ void appendArray( const StringData& name, BSONObj subObj ) {
+ fill( name );
+ _b.appendArray( num().c_str(), subObj );
+ }
+
+ void appendAs( const BSONElement &e, const char *name ) {
+ fill( name );
+ append( e );
+ }
+
+ private:
+ void fill( const StringData& name ) {
+ char *r;
+ int n = strtol( name.data(), &r, 10 );
+ if ( *r )
+ uasserted( 13048, (string)"can't append to array using string field name [" + name.data() + "]" );
+ while( _i < n )
+ append( nullElt() );
+ }
+
+ static BSONElement nullElt() {
+ static BSONObj n = nullObj();
+ return n.firstElement();
+ }
+
+ static BSONObj nullObj() {
+ BSONObjBuilder _b;
+ _b.appendNull( "" );
+ return _b.obj();
+ }
+
+ string num(){ return _b.numStr(_i++); }
+ int _i;
+ BSONObjBuilder _b;
+ };
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const vector< T >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned int i = 0; i < vals.size(); ++i )
+ arrBuilder.append( numStr( i ), vals[ i ] );
+ appendArray( fieldName, arrBuilder.done() );
+ return *this;
+ }
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ BSONObjBuilder arrBuilder;
+ int n = 0;
+ for( typename list< T >::const_iterator i = vals.begin(); i != vals.end(); i++ )
+ arrBuilder.append( numStr(n++), *i );
+ appendArray( fieldName, arrBuilder.done() );
+ return *this;
+ }
+
+ // $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT 6));
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b)
+ { return BSON( "$or" << BSON_ARRAY(a << b) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e << f) ); }
+
+}
diff --git a/bson/bsonobjiterator.h b/bson/bsonobjiterator.h
new file mode 100644
index 0000000..c8224d2
--- /dev/null
+++ b/bson/bsonobjiterator.h
@@ -0,0 +1,131 @@
+// bsonobjiterator.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/preprocessor/cat.hpp> // like the ## operator but works with __LINE__
+
+namespace mongo {
+ /** iterator for a BSONObj
+
+ Note each BSONObj ends with an EOO element: so you will get more() on an empty
+ object, although next().eoo() will be true.
+
+ todo: we may want to make a more stl-like iterator interface for this
+ with things like begin() and end()
+ */
+ class BSONObjIterator {
+ public:
+ /** Create an iterator for a BSON object.
+ */
+ BSONObjIterator(const BSONObj& jso) {
+ int sz = jso.objsize();
+ if ( sz == 0 ) {
+ _pos = _theend = 0;
+ return;
+ }
+ _pos = jso.objdata() + 4;
+ _theend = jso.objdata() + sz;
+ }
+
+ BSONObjIterator( const char * start , const char * end ){
+ _pos = start + 4;
+ _theend = end;
+ }
+
+ /** @return true if more elements exist to be enumerated. */
+ bool moreWithEOO() {
+ return _pos < _theend;
+ }
+ bool more(){
+ return _pos < _theend && _pos[0];
+ }
+ /** @return the next element in the object. For the final element, element.eoo() will be true. */
+ BSONElement next( bool checkEnd = false ) {
+ assert( _pos < _theend );
+ BSONElement e( _pos, checkEnd ? (int)(_theend - _pos) : -1 );
+ _pos += e.size( checkEnd ? (int)(_theend - _pos) : -1 );
+ return e;
+ }
+
+ void operator++() { next(); }
+ void operator++(int) { next(); }
+
+ BSONElement operator*() {
+ assert( _pos < _theend );
+ return BSONElement(_pos, -1);
+ }
+
+ private:
+ const char* _pos;
+ const char* _theend;
+ };
+
+ class BSONObjIteratorSorted {
+ public:
+ BSONObjIteratorSorted( const BSONObj& o );
+
+ ~BSONObjIteratorSorted(){
+ assert( _fields );
+ delete[] _fields;
+ _fields = 0;
+ }
+
+ bool more(){
+ return _cur < _nfields;
+ }
+
+ BSONElement next(){
+ assert( _fields );
+ if ( _cur < _nfields )
+ return BSONElement( _fields[_cur++] );
+ return BSONElement();
+ }
+
+ private:
+ const char ** _fields;
+ int _nfields;
+ int _cur;
+ };
+
+/** Similar to BOOST_FOREACH
+ *
+ * because the iterator is defined outside of the for, you must use {} around
+ * the surrounding scope. Don't do this:
+ *
+ * if (foo)
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ *
+ * but this is OK:
+ *
+ * if (foo) {
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ * }
+ *
+ */
+
+#define BSONForEach(e, obj) \
+ BSONObjIterator BOOST_PP_CAT(it_,__LINE__)(obj); \
+ for ( BSONElement e; \
+ (BOOST_PP_CAT(it_,__LINE__).more() ? \
+ (e = BOOST_PP_CAT(it_,__LINE__).next(), true) : \
+ false) ; \
+ /*nothing*/ )
+
+}
diff --git a/bson/bsontypes.h b/bson/bsontypes.h
new file mode 100644
index 0000000..27f2aaf
--- /dev/null
+++ b/bson/bsontypes.h
@@ -0,0 +1,107 @@
+// bsontypes.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "util/misc.h"
+
+namespace bson { }
+
+namespace mongo {
+
+ using namespace std;
+
+ class BSONArrayBuilder;
+ class BSONElement;
+ class BSONObj;
+ class BSONObjBuilder;
+ class BSONObjBuilderValueStream;
+ class BSONObjIterator;
+ class Ordering;
+ class Record;
+ struct BSONArray; // empty subclass of BSONObj useful for overloading
+ struct BSONElementCmpWithoutField;
+
+ extern BSONObj maxKey;
+ extern BSONObj minKey;
+
+/**
+ the complete list of valid BSON types
+ see also bsonspec.org
+*/
+enum BSONType {
+ /** smaller than all other types */
+ MinKey=-1,
+ /** end of object */
+ EOO=0,
+ /** double precision floating point value */
+ NumberDouble=1,
+ /** character string, stored in utf8 */
+ String=2,
+ /** an embedded object */
+ Object=3,
+ /** an embedded array */
+ Array=4,
+ /** binary data */
+ BinData=5,
+ /** Undefined type */
+ Undefined=6,
+ /** ObjectId */
+ jstOID=7,
+ /** boolean type */
+ Bool=8,
+ /** date type */
+ Date=9,
+ /** null type */
+ jstNULL=10,
+ /** regular expression, a pattern with options */
+ RegEx=11,
+ /** deprecated / will be redesigned */
+ DBRef=12,
+ /** deprecated / use CodeWScope */
+ Code=13,
+ /** a programming language (e.g., Python) symbol */
+ Symbol=14,
+ /** javascript code that can execute on the database server, with SavedContext */
+ CodeWScope=15,
+ /** 32 bit signed integer */
+ NumberInt = 16,
+ /** Updated to a Date with value next OpTime on insert */
+ Timestamp = 17,
+ /** 64 bit integer */
+ NumberLong = 18,
+ /** max type that is not MaxKey */
+ JSTypeMax=18,
+ /** larger than all other types */
+ MaxKey=127
+};
+
+ /* subtypes of BinData.
+ bdtCustom and above are ones that the JS compiler understands, but are
+ opaque to the database.
+ */
+ enum BinDataType {
+ BinDataGeneral=0,
+ Function=1,
+ ByteArrayDeprecated=2, /* use BinGeneral instead */
+ bdtUUID = 3,
+ MD5Type=5,
+ bdtCustom=128
+ };
+
+}
+
diff --git a/bson/inline_decls.h b/bson/inline_decls.h
new file mode 100644
index 0000000..aab9810
--- /dev/null
+++ b/bson/inline_decls.h
@@ -0,0 +1,33 @@
+// inline.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#if defined(__GNUC__)
+
+#define NOINLINE_DECL __attribute__((noinline))
+
+#elif defined(_MSC_VER)
+
+#define NOINLINE_DECL __declspec(noinline)
+
+#else
+
+#define NOINLINE_DECL
+
+#endif
diff --git a/bson/oid.h b/bson/oid.h
new file mode 100644
index 0000000..c1bf34d
--- /dev/null
+++ b/bson/oid.h
@@ -0,0 +1,113 @@
+// oid.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../util/hex.h"
+
+namespace mongo {
+
+#pragma pack(1)
+ /** Object ID type.
+ BSON objects typically have an _id field for the object id. This field should be the first
+ member of the object when present. class OID is a special type that is a 12 byte id which
+ is likely to be unique to the system. You may also use other types for _id's.
+ When _id field is missing from a BSON object, on an insert the database may insert one
+ automatically in certain circumstances.
+
+ Warning: You must call OID::newState() after a fork().
+ */
+ class OID {
+ union {
+ struct{
+ long long a;
+ unsigned b;
+ };
+ unsigned char data[12];
+ };
+ static unsigned _machine;
+ public:
+ /** call this after a fork */
+ static void newState();
+
+ /** initialize to 'null' */
+ void clear() { a = 0; b = 0; }
+
+ const unsigned char *getData() const { return data; }
+
+ bool operator==(const OID& r) {
+ return a==r.a&&b==r.b;
+ }
+ bool operator!=(const OID& r) {
+ return a!=r.a||b!=r.b;
+ }
+
+ /** The object ID output as 24 hex digits. */
+ string str() const {
+ return toHexLower(data, 12);
+ }
+
+ string toString() const { return str(); }
+
+ static OID gen() { OID o; o.init(); return o; }
+
+ static unsigned staticMachine(){ return _machine; }
+ /**
+ sets the contents to a new oid / randomized value
+ */
+ void init();
+
+ /** Set to the hex string value specified. */
+ void init( string s );
+
+ /** Set to the min/max OID that could be generated at given timestamp. */
+ void init( Date_t date, bool max=false );
+
+ time_t asTimeT();
+ Date_t asDateT() { return asTimeT() * (long long)1000; }
+
+ bool isSet() const { return a || b; }
+
+ int compare( const OID& other ) const { return memcmp( data , other.data , 12 ); }
+
+ bool operator<( const OID& other ) const { return compare( other ) < 0; }
+ };
+#pragma pack()
+
+ ostream& operator<<( ostream &s, const OID &o );
+ inline StringBuilder& operator<< (StringBuilder& s, const OID& o) { return (s << o.str()); }
+
+ /** Formatting mode for generating JSON from BSON.
+ See <http://mongodb.onconfluence.com/display/DOCS/Mongo+Extended+JSON>
+ for details.
+ */
+ enum JsonStringFormat {
+ /** strict RFC format */
+ Strict,
+ /** 10gen format, which is close to JS format. This form is understandable by
+ javascript running inside the Mongo server via eval() */
+ TenGen,
+ /** Javascript JSON compatible */
+ JS
+ };
+
+ inline ostream& operator<<( ostream &s, const OID &o ) {
+ s << o.str();
+ return s;
+ }
+
+}
diff --git a/bson/ordering.h b/bson/ordering.h
new file mode 100644
index 0000000..fbbfbec
--- /dev/null
+++ b/bson/ordering.h
@@ -0,0 +1,66 @@
+// ordering.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ /** A precomputation of a BSON key pattern.
+ The constructor is private to make conversion more explicit so we notice where we call make().
+ Over time we should push this up higher and higher.
+ */
+ class Ordering {
+ const unsigned bits;
+ const unsigned nkeys;
+ Ordering(unsigned b,unsigned n) : bits(b),nkeys(n) { }
+ public:
+ /** so, for key pattern { a : 1, b : -1 }
+ get(0) == 1
+ get(1) == -1
+ */
+ int get(int i) const {
+ return ((1 << i) & bits) ? -1 : 1;
+ }
+
+ // for woCompare...
+ unsigned descending(unsigned mask) const { return bits & mask; }
+
+ operator string() const {
+ StringBuilder buf(32);
+ for ( unsigned i=0; i<nkeys; i++)
+ buf.append( get(i) > 0 ? "+" : "-" );
+ return buf.str();
+ }
+
+ static Ordering make(const BSONObj& obj) {
+ unsigned b = 0;
+ BSONObjIterator k(obj);
+ unsigned n = 0;
+ while( 1 ) {
+ BSONElement e = k.next();
+ if( e.eoo() )
+ break;
+ uassert( 13103, "too many compound keys", n <= 31 );
+ if( e.number() < 0 )
+ b |= (1 << n);
+ n++;
+ }
+ return Ordering(b,n);
+ }
+ };
+
+}
diff --git a/bson/stringdata.h b/bson/stringdata.h
new file mode 100644
index 0000000..ccf30f7
--- /dev/null
+++ b/bson/stringdata.h
@@ -0,0 +1,64 @@
+// stringdata.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BSON_STRINDATA_HEADER
+#define BSON_STRINDATA_HEADER
+
+#include <string>
+#include <cstring>
+
+namespace mongo {
+
+ using std::string;
+
+ class StringData {
+ public:
+ StringData( const char* c )
+ : _data(c), _size((unsigned) strlen(c)) {}
+
+ StringData( const string& s )
+ : _data(s.c_str()), _size((unsigned) s.size()) {}
+
+ struct LiteralTag {};
+ template<size_t N>
+ StringData( const char (&val)[N], LiteralTag )
+ : _data(&val[0]), _size(N-1) {}
+
+ // Construct a StringData explicitly, for the case where the
+ // length of the string is already known. 'c' must be a
+ // pointer to a null-terminated string, and strlenOfc must be
+ // the length that std::strlen(c) would return, a.k.a the
+ // index of the terminator in c.
+ StringData( const char* c, size_t strlenOfc )
+ : _data(c), _size((unsigned) strlenOfc) {}
+
+ const char* const data() const { return _data; }
+ const unsigned size() const { return _size; }
+
+ private:
+ // TODO - Hook this class up in the BSON machinery
+ // There are two assumptions here that we may want to review then.
+ // '_data' *always* finishes with a null terminator
+ // 'size' does *not* account for the null terminator
+ // These assumptions may make it easier to minimize changes to existing code
+ const char* const _data;
+ const unsigned _size;
+ };
+
+} // namespace mongo
+
+#endif // BSON_STRINGDATA_HEADER
diff --git a/util/atomic_int.h b/bson/util/atomic_int.h
index de50560..f4d2749 100644
--- a/util/atomic_int.h
+++ b/bson/util/atomic_int.h
@@ -22,8 +22,7 @@
# include <windows.h>
#endif
-namespace mongo{
-
+namespace mongo {
struct AtomicUInt{
AtomicUInt() : x(0) {}
diff --git a/util/builder.h b/bson/util/builder.h
index f9d3514..75a1ad8 100644
--- a/util/builder.h
+++ b/bson/util/builder.h
@@ -17,19 +17,27 @@
#pragma once
-#include "../stdafx.h"
+#include <string>
#include <string.h>
+#include <stdio.h>
+#include <boost/shared_ptr.hpp>
+
+#include "../inline_decls.h"
+#include "../stringdata.h"
namespace mongo {
class StringBuilder;
+ void msgasserted(int msgid, const char *msg);
+
class BufBuilder {
public:
BufBuilder(int initsize = 512) : size(initsize) {
if ( size > 0 ) {
data = (char *) malloc(size);
- assert(data);
+ if( data == 0 )
+ msgasserted(10000, "out of memory BufBuilder");
} else {
data = 0;
}
@@ -52,58 +60,54 @@ namespace mongo {
free(data);
data = (char*)malloc(maxSize);
size = maxSize;
- }
-
+ }
}
/* leave room for some stuff later */
- void skip(int n) {
- grow(n);
- }
+ char* skip(int n) { return grow(n); }
/* note this may be deallocated (realloced) if you keep writing. */
- char* buf() {
- return data;
- }
+ char* buf() { return data; }
+ const char* buf() const { return data; }
- /* assume ownership of the buffer - you must then free it */
- void decouple() {
- data = 0;
- }
+ /* assume ownership of the buffer - you must then free() it */
+ void decouple() { data = 0; }
- template<class T> void append(T j) {
- *((T*)grow(sizeof(T))) = j;
+ void appendChar(char j){
+ *((char*)grow(sizeof(char))) = j;
}
- void append(short j) {
- append<short>(j);
+ void appendNum(char j){
+ *((char*)grow(sizeof(char))) = j;
}
- void append(int j) {
- append<int>(j);
+ void appendNum(short j) {
+ *((short*)grow(sizeof(short))) = j;
}
- void append(unsigned j) {
- append<unsigned>(j);
+ void appendNum(int j) {
+ *((int*)grow(sizeof(int))) = j;
}
- void append(bool j) {
- append<bool>(j);
+ void appendNum(unsigned j) {
+ *((unsigned*)grow(sizeof(unsigned))) = j;
}
- void append(double j) {
- append<double>(j);
+ void appendNum(bool j) {
+ *((bool*)grow(sizeof(bool))) = j;
}
-
- void append(const void *src, size_t len) {
- memcpy(grow(len), src, len);
+ void appendNum(double j) {
+ *((double*)grow(sizeof(double))) = j;
}
-
- void append(const char *str) {
- append((void*) str, strlen(str)+1);
+ void appendNum(long long j) {
+ *((long long*)grow(sizeof(long long))) = j;
}
-
- void append(const string &str) {
- append( (void *)str.c_str(), str.length() + 1 );
+ void appendNum(unsigned long long j) {
+ *((unsigned long long*)grow(sizeof(unsigned long long))) = j;
+ }
+
+ void appendBuf(const void *src, size_t len) {
+ memcpy(grow((int) len), src, len);
}
- void append( int val , int padding ){
-
+ void appendStr(const StringData &str , bool includeEOO = true ) {
+ const int len = str.size() + ( includeEOO ? 1 : 0 );
+ memcpy(grow(len), str.data(), len);
}
int len() const {
@@ -114,24 +118,32 @@ namespace mongo {
l = newLen;
}
- private:
/* returns the pre-grow write position */
- char* grow(int by) {
+ inline char* grow(int by) {
int oldlen = l;
l += by;
if ( l > size ) {
- int a = size * 2;
- if ( a == 0 )
- a = 512;
- if ( l > a )
- a = l + 16 * 1024;
- assert( a < 64 * 1024 * 1024 );
- data = (char *) realloc(data, a);
- size= a;
+ grow_reallocate();
}
return data + oldlen;
}
+ int getSize() const { return size; }
+
+ private:
+ /* "slow" portion of 'grow()' */
+ void NOINLINE_DECL grow_reallocate(){
+ int a = size * 2;
+ if ( a == 0 )
+ a = 512;
+ if ( l > a )
+ a = l + 16 * 1024;
+ if( a > 64 * 1024 * 1024 )
+ msgasserted(10000, "BufBuilder grow() > 64MB");
+ data = (char *) realloc(data, a);
+ size= a;
+ }
+
char *data;
int l;
int size;
@@ -139,6 +151,10 @@ namespace mongo {
friend class StringBuilder;
};
+#if defined(_WIN32)
+#pragma warning( disable : 4996 )
+#endif
+
class StringBuilder {
public:
StringBuilder( int initsize=256 )
@@ -150,7 +166,6 @@ namespace mongo {
int z = sprintf( _buf.grow(maxSize) , macro , (val) ); \
_buf.l = prev + z; \
return *this;
-
StringBuilder& operator<<( double x ){
SBNUM( x , 25 , "%g" );
@@ -180,17 +195,17 @@ namespace mongo {
_buf.grow( 1 )[0] = c;
return *this;
}
+#undef SBNUM
- void append( const char * str ){
- int x = strlen( str );
- memcpy( _buf.grow( x ) , str , x );
+ void write( const char* buf, int len){
+ memcpy( _buf.grow( len ) , buf , len );
}
- StringBuilder& operator<<( const char * str ){
- append( str );
- return *this;
+
+ void append( const StringData& str ){
+ memcpy( _buf.grow( str.size() ) , str.data() , str.size() );
}
- StringBuilder& operator<<( const string& s ){
- append( s.c_str() );
+ StringBuilder& operator<<( const StringData& str ){
+ append( str );
return *this;
}
@@ -200,8 +215,8 @@ namespace mongo {
_buf.reset( maxSize );
}
- string str(){
- return string(_buf.data, _buf.l);
+ std::string str(){
+ return std::string(_buf.data, _buf.l);
}
private:
diff --git a/bson/util/misc.h b/bson/util/misc.h
new file mode 100644
index 0000000..cad9a28
--- /dev/null
+++ b/bson/util/misc.h
@@ -0,0 +1,94 @@
+/* @file util.h
+*/
+
+/*
+ * Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ctime>
+
+namespace mongo {
+
+ using namespace std;
+
+ inline void time_t_to_String(time_t t, char *buf) {
+#if defined(_WIN32)
+ ctime_s(buf, 32, &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[24] = 0; // don't want the \n
+ }
+
+ inline string time_t_to_String(time_t t = time(0) ){
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[24] = 0; // don't want the \n
+ return buf;
+ }
+
+ inline string time_t_to_String_no_year(time_t t) {
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[19] = 0;
+ return buf;
+ }
+
+ inline string time_t_to_String_short(time_t t) {
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[19] = 0;
+ if( buf[0] && buf[1] && buf[2] && buf[3] )
+ return buf + 4; // skip day of week
+ return buf;
+ }
+
+ struct Date_t {
+ // TODO: make signed (and look for related TODO's)
+ unsigned long long millis;
+ Date_t(): millis(0) {}
+ Date_t(unsigned long long m): millis(m) {}
+ operator unsigned long long&() { return millis; }
+ operator const unsigned long long&() const { return millis; }
+ string toString() const {
+ char buf[64];
+ time_t_to_String(millis/1000, buf);
+ return buf;
+ }
+ };
+
+ // Like strlen, but only scans up to n bytes.
+ // Returns -1 if no '0' found.
+ inline int strnlen( const char *s, int n ) {
+ for( int i = 0; i < n; ++i )
+ if ( !s[ i ] )
+ return i;
+ return -1;
+ }
+}
diff --git a/buildscripts/bb.py b/buildscripts/bb.py
index e1e36f6..1e87828 100644
--- a/buildscripts/bb.py
+++ b/buildscripts/bb.py
@@ -16,7 +16,7 @@ def checkOk():
print( "excpted version [" + m + "]" )
from subprocess import Popen, PIPE
- diff = Popen( [ "git", "diff", "origin/v1.4" ], stdout=PIPE ).communicate()[ 0 ]
+ diff = Popen( [ "git", "diff", "origin/v1.2" ], stdout=PIPE ).communicate()[ 0 ]
if len(diff) > 0:
print( diff )
raise Exception( "build bot broken?" )
diff --git a/buildscripts/buildboost.bat b/buildscripts/buildboost.bat
new file mode 100644
index 0000000..b1b8ad0
--- /dev/null
+++ b/buildscripts/buildboost.bat
@@ -0,0 +1,54 @@
+@echo off
+
+cls
+echo This script builds the boost libs that MongoDB requires on Windows.
+echo We assume boost source is in machine's \boost directory.
+echo You can get boost at www.boost.org.
+echo .
+echo Note: you will want boost v1.42 or higher with VS2010.
+echo .
+echo We assume you have bjam. To build bjam:
+echo cd tools\jam\src
+echo build.bat
+echo .
+
+cd \boost
+echo bin\bjam --version
+bin\bjam --version
+
+echo .
+echo .
+echo .
+echo About to build release libraries
+pause
+cls
+bin\bjam variant=release runtime-link=static link=static --with-filesystem --with-thread --with-date_time --with-program_options --layout=versioned threading=multi toolset=msvc
+echo .
+echo .
+echo .
+echo About to try to move libs from /boost/stage/lib to /boost/lib/
+pause
+cls
+rem bjam makes extra copies without the ver #; we kill those:
+del stage\lib\*s.lib
+move stage\lib\* lib\
+
+echo .
+echo .
+echo .
+echo About to build debug libraries
+pause
+cls
+bin\bjam variant=debug --with-filesystem --with-thread --with-date_time --with-program_options --layout=versioned threading=multi toolset=msvc
+
+echo .
+echo .
+echo .
+echo About to try to move libs from /boost/stage/lib to /boost/lib/
+pause
+cls
+rem bjam makes extra copies without the ver #; we kill those:
+del stage\lib\*-gd.lib
+move stage\lib\* lib\
+
+echo Done - try running "dir \boost\lib\"
diff --git a/buildscripts/buildboost64.bat b/buildscripts/buildboost64.bat
new file mode 100644
index 0000000..86f3e11
--- /dev/null
+++ b/buildscripts/buildboost64.bat
@@ -0,0 +1,61 @@
+@echo off
+
+rem 64 bit version
+rem address-model=64
+
+rem run
+rem bin\bjam --clean
+rem if you switch compilers etc.
+
+cls
+echo This script builds the (64 bit) boost libs that MongoDB requires on Windows.
+echo We assume boost source is in machine's \boost directory.
+echo You can get boost at www.boost.org.
+echo .
+echo Note: you will want boost v1.42 or higher with VS2010.
+echo .
+echo We assume you have bjam. To build bjam:
+echo cd tools\jam\src
+echo build.bat
+echo .
+
+cd \boost
+echo bin\bjam --version
+bin\bjam --version
+
+echo .
+echo .
+echo .
+echo About to build release libraries
+pause
+cls
+bin\bjam --build-dir=c:\temp\boost64 address-model=64 variant=release runtime-link=static link=static --with-filesystem --with-thread --with-date_time --with-program_options --layout=versioned threading=multi toolset=msvc
+echo .
+echo .
+echo .
+echo About to try to move libs from /boost/stage/lib to /boost/lib/
+pause
+cls
+rem bjam makes extra copies without the ver #; we kill those:
+del stage\lib\*s.lib
+move stage\lib\* lib\
+
+echo .
+echo .
+echo .
+echo About to build debug libraries
+pause
+cls
+bin\bjam --build-dir=c:\temp\boost64 address-model=64 variant=debug --with-filesystem --with-thread --with-date_time --with-program_options --layout=versioned threading=multi toolset=msvc
+
+echo .
+echo .
+echo .
+echo About to try to move libs from /boost/stage/lib to /boost/lib/
+pause
+cls
+rem bjam makes extra copies without the ver #; we kill those:
+del stage\lib\*-gd.lib
+move stage\lib\* lib\
+
+echo Done - try running "dir \boost\lib\"
diff --git a/buildscripts/cleanbb.py b/buildscripts/cleanbb.py
index 68a8012..261519a 100644
--- a/buildscripts/cleanbb.py
+++ b/buildscripts/cleanbb.py
@@ -3,17 +3,39 @@ import sys
import os
import utils
import time
+from optparse import OptionParser
+
+cwd = os.getcwd();
+if cwd.find("buildscripts" ) > 0 :
+ cwd = cwd.partition( "buildscripts" )[0]
+
+print( "cwd [" + cwd + "]" )
+
+def shouldKill( c ):
+ if c.find( cwd ) >= 0:
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ return False
def killprocs( signal="" ):
- cwd = os.getcwd();
- if cwd.find("buildscripts" ) > 0 :
- cwd = cwd.partition( "buildscripts" )[0]
killed = 0
- for x in utils.getprocesslist():
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
x = x.lstrip()
- if x.find( cwd ) < 0:
+ if not shouldKill( x ):
continue
pid = x.partition( " " )[0]
@@ -24,20 +46,31 @@ def killprocs( signal="" ):
return killed
-def cleanup( root ):
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs() > 0:
+ time.sleep(3)
+ killprocs("-9")
+
# delete all regular files, directories can stay
# NOTE: if we delete directories later, we can't delete diskfulltest
for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
for x in filenames:
- os.remove( dirpath + "/" + x )
+ foo = dirpath + "/" + x
+ print( "removing: " + foo )
+ os.remove( foo )
- if killprocs() > 0:
- time.sleep(3)
- killprocs("-9")
if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
root = "/data/db/"
- if len( sys.argv ) > 1:
- root = sys.argv[1]
- cleanup( root )
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/buildscripts/confluence_export.py b/buildscripts/confluence_export.py
index 956605b..29cdde6 100644
--- a/buildscripts/confluence_export.py
+++ b/buildscripts/confluence_export.py
@@ -14,10 +14,15 @@ import shutil
import subprocess
import sys
import urllib2
+sys.path[0:0] = [""]
+import simples3
from suds.client import Client
-SOAP_URI = "http://mongodb.onconfluence.com/rpc/soap-axis/confluenceservice-v1?wsdl"
+import settings
+
+HTML_URI = "http://mongodb.onconfluence.com/rpc/soap-axis/confluenceservice-v1?wsdl"
+PDF_URI = "http://www.mongodb.org/rpc/soap-axis/pdfexport?wsdl"
USERNAME = "soap"
PASSWORD = "soap"
AUTH_URI = "http://www.mongodb.org/login.action?os_authType=basic"
@@ -25,12 +30,18 @@ TMP_DIR = "confluence-tmp"
TMP_FILE = "confluence-tmp.zip"
-def export_and_get_uri():
- client = Client(SOAP_URI)
+def export_html_and_get_uri():
+ client = Client(HTML_URI)
auth = client.service.login(USERNAME, PASSWORD)
return client.service.exportSpace(auth, "DOCS", "TYPE_HTML")
+def export_pdf_and_get_uri():
+ client = Client(PDF_URI)
+ auth = client.service.login(USERNAME, PASSWORD)
+ return client.service.exportSpace(auth, "DOCS")
+
+
def login_and_download(docs):
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
@@ -69,11 +80,21 @@ def overwrite(src, dest):
os.symlink(os.path.abspath(target), os.path.abspath(current))
+def write_to_s3(pdf):
+ s3 = simples3.S3Bucket(settings.bucket, settings.id, settings.key)
+ name = "docs/mongodb-docs-%s.pdf" % datetime.date.today()
+ s3.put(name, pdf, acl="public-read")
+
+
def main(dir):
+ # HTML
rmdir(TMP_DIR)
- extract_to_dir(login_and_download(export_and_get_uri()), TMP_DIR)
+ extract_to_dir(login_and_download(export_html_and_get_uri()), TMP_DIR)
overwrite("%s/DOCS/" % TMP_DIR, dir)
+ # PDF
+ write_to_s3(login_and_download(export_pdf_and_get_uri()).read())
+
if __name__ == "__main__":
try:
diff --git a/buildscripts/distmirror.py b/buildscripts/distmirror.py
new file mode 100644
index 0000000..1902e2a
--- /dev/null
+++ b/buildscripts/distmirror.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+
+# Download mongodb stuff (at present builds, sources, docs, but not
+# drivers).
+
+# Usage: <progname> [directory] # directory defaults to cwd.
+
+# FIXME: this script is fairly sloppy.
+import sys
+import os
+import urllib2
+import time
+import hashlib
+import warnings
+
+written_files = []
+def get(url, filename):
+ # A little safety check.
+ if filename in written_files:
+ raise Exception('not overwriting file %s (already written in this session)' % filename)
+ else:
+ written_files.append(filename)
+ print "downloading %s to %s" % (url, filename)
+ open(filename, 'w').write(urllib2.urlopen(url).read())
+
+
+def checkmd5(md5str, filename):
+ m = hashlib.md5()
+ m.update(open(filename, 'rb').read())
+ d = m.hexdigest()
+ if d != md5str:
+ warnings.warn("md5sum mismatch for file %s: wanted %s; got %s" % (filename, md5str, d))
+
+osarches=(("osx", ("i386", "i386-tiger", "x86_64"), ("tgz", )),
+ ("linux", ("i686", "x86_64"), ("tgz", )),
+ ("win32", ("i386", "x86_64"), ("zip", )),
+ ("sunos5", ("i86pc", "x86_64"), ("tgz", )),
+ ("src", ("src", ), ("tar.gz", "zip")), )
+
+# KLUDGE: this will need constant editing.
+versions = ("1.4.2", "1.5.1", "latest")
+
+url_format = "http://downloads.mongodb.org/%s/mongodb-%s-%s.%s"
+filename_format = "mongodb-%s-%s.%s"
+
+def core_server():
+ for version in versions:
+ for (os, architectures, archives) in osarches:
+ for architecture in architectures:
+ for archive in archives:
+ osarch = os + '-' + architecture if architecture != 'src' else 'src'
+ # ugh.
+ if architecture == 'src' and version == 'latest':
+ if archive == 'tar.gz':
+ archive2 = 'tarball'
+ elif archive == 'zip':
+ archive2 == 'zipball'
+ url = "http://github.com/mongodb/mongo/"+archive2+"/master"
+ version2 = "master"
+ else:
+ version2 = version if architecture != 'src' else 'r'+version
+ url = url_format % (os, osarch, version2, archive)
+ # ugh ugh
+ md5url = url+'.md5' if architecture != 'src' else None
+ filename = filename_format % (osarch, version2, archive)
+ get(url, filename)
+ if md5url:
+ print "fetching md5 url " + md5url
+ md5str = urllib2.urlopen(md5url).read()
+ checkmd5(md5str, filename)
+
+def drivers():
+ # Drivers... FIXME: drivers.
+ driver_url_format = "http://github.com/mongodb/mongo-%s-driver/%s/%s"
+ driver_filename_format = "mongo-%s-driver-%s.%s"
+ drivers=(("python", ("1.6", "master"), ("zipball", "tarball"), None),
+ ("ruby", ("0.20", "master"), ("zipball", "tarball"), None),
+ ("c", ("v0.1", "master"), ("zipball", "tarball"), None),
+ # FIXME: PHP, Java, and Csharp also have zips and jars of
+ # precompiled relesaes.
+ ("php", ("1.0.6", "master"), ("zipball", "tarball"), None),
+ ("java", ("r1.4", "r2.0rc1", "master"), ("zipball", "tarball"), None),
+ # And Csharp is in a different github place, too.
+ ("csharp", ("0.82.2", "master"), ("zipball", "tarball"),
+ "http://github.com/samus/mongodb-%s/%s/%s"),
+ )
+
+ for (lang, releases, archives, url_format) in drivers:
+ for release in releases:
+ for archive in archives:
+ url = (url_format if url_format else driver_url_format) % (lang, archive, release)
+ if archive == 'zipball':
+ extension = 'zip'
+ elif archive == 'tarball':
+ extension = 'tgz'
+ else:
+ raise Exception('unknown archive format %s' % archive)
+ filename = driver_filename_format % (lang, release, extension)
+ get(url, filename)
+ # ugh ugh ugh
+ if lang == 'csharp' and release != 'master':
+ url = 'http://github.com/downloads/samus/mongodb-csharp/MongoDBDriver-Release-%.zip' % (release)
+ filename = 'MongoDBDriver-Release-%.zip' % (release)
+ get(url, filename)
+ if lang == 'java' and release != 'master':
+ get('http://github.com/downloads/mongodb/mongo-java-driver/mongo-%s.jar' % (release), 'mongo-%s.jar' % (release))
+ # I have no idea what's going on with the PHP zipfiles.
+ if lang == 'php' and release == '1.0.6':
+ get('http://github.com/downloads/mongodb/mongo-php-driver/mongo-1.0.6-php5.2-osx.zip', 'mongo-1.0.6-php5.2-osx.zip')
+ get('http://github.com/downloads/mongodb/mongo-php-driver/mongo-1.0.6-php5.3-osx.zip', 'mongo-1.0.6-php5.3-osx.zip')
+
+def docs():
+ # FIXME: in principle, the doc PDFs could be out of date.
+ docs_url = time.strftime("http://downloads.mongodb.org/docs/mongodb-docs-%Y-%m-%d.pdf")
+ docs_filename = time.strftime("mongodb-docs-%Y-%m-%d.pdf")
+ get(docs_url, docs_filename)
+
+def extras():
+ # Extras
+ extras = ("http://media.mongodb.org/zips.json", )
+ for extra in extras:
+ if extra.rfind('/') > -1:
+ filename = extra[extra.rfind('/')+1:]
+ else:
+ raise Exception('URL %s lacks a slash?' % extra)
+ get(extra, filename)
+
+if len(sys.argv) > 1:
+ dir=sys.argv[1]
+ os.makedirs(dir)
+ os.chdir(dir)
+
+print """NOTE: the md5sums for all the -latest tarballs are out of
+date. You will probably see warnings as this script runs. (If you
+don't, feel free to delete this note.)"""
+core_server()
+drivers()
+docs()
+extras()
diff --git a/buildscripts/docs.py b/buildscripts/docs.py
new file mode 100644
index 0000000..719b5af
--- /dev/null
+++ b/buildscripts/docs.py
@@ -0,0 +1,120 @@
+"""Build the C++ client docs and the MongoDB server docs.
+"""
+
+from __future__ import with_statement
+import os
+import shutil
+import socket
+import subprocess
+import time
+import urllib2
+
+import markdown
+
+
+def clean_dir(dir):
+ try:
+ shutil.rmtree(dir)
+ except:
+ pass
+ os.makedirs(dir)
+
+
+def convert_dir(source, dest):
+ clean_dir(dest)
+
+ for x in os.listdir(source + "/"):
+ if not x.endswith(".md"):
+ continue
+
+ with open("%s/%s" % (source, x)) as f:
+ raw = f.read()
+
+ html = markdown.markdown(raw)
+ print(x)
+
+ with open("%s/%s" % (dest, x.replace(".md", ".html")), 'w') as o:
+ o.write(html)
+
+
+def check_mongo():
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", 31999))
+ sock.close()
+
+def did_mongod_start(timeout=20):
+ while timeout > 0:
+ time.sleep(1)
+ try:
+ check_mongo()
+ return True
+ except Exception,e:
+ print e
+ timeout = timeout - 1
+ return False
+
+def stop(proc):
+ try:
+ proc.terminate()
+ except AttributeError:
+ os.kill(proc.pid, 15)
+
+def commands_list(out):
+ clean_dir("dummy_data_dir")
+ with open("/dev/null") as null:
+ try:
+ p = subprocess.Popen(["./mongod", "--dbpath", "dummy_data_dir",
+ "--port", "31999", "--rest"], stdout=null, stderr=null)
+ except:
+ print "No mongod? Skipping..."
+ return
+ if not did_mongod_start():
+ print "Slow mongod? Skipping..."
+ stop(p)
+ return
+ print "Started mongod"
+
+ with open(out, "w") as f:
+ f.write("<base href='http://localhost:28017'/>")
+ f.write(urllib2.urlopen("http://localhost:32999/_commands").read())
+
+ print "Stopping mongod"
+ stop(p)
+
+def gen_cplusplus(dir):
+ clean_dir(dir)
+ clean_dir("docs/doxygen")
+
+ # Too noisy...
+ with open("/dev/null") as null:
+ subprocess.call(["doxygen", "doxygenConfig"], stdout=null, stderr=null)
+
+ os.rename("docs/doxygen/html", dir)
+
+
+def version():
+ """Get the server version from doxygenConfig.
+ """
+ with open("doxygenConfig") as f:
+ for line in f.readlines():
+ if line.startswith("PROJECT_NUMBER"):
+ return line.split("=")[1].strip()
+
+
+def main():
+ v = version()
+ print("Generating server docs in docs/html/internal/%s" % v)
+ convert_dir("docs", "docs/html/internal/%s" % v)
+ print("Generating commands list")
+ commands_list("docs/html/internal/%s/commands.html" % v)
+ shutil.rmtree("dummy_data_dir")
+ print("Generating C++ docs in docs/html/cplusplus/%s" % v)
+ gen_cplusplus("docs/html/cplusplus/%s" % v)
+
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/buildscripts/errorcodes.py b/buildscripts/errorcodes.py
index 7a7e017..d87b7ad 100644..100755
--- a/buildscripts/errorcodes.py
+++ b/buildscripts/errorcodes.py
@@ -3,23 +3,24 @@
import os
import sys
import re
+import utils
def getAllSourceFiles( arr=None , prefix="." ):
if arr is None:
arr = []
for x in os.listdir( prefix ):
- if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
continue
full = prefix + "/" + x
- if os.path.isdir( full ):
+ if os.path.isdir( full ) and not os.path.islink( full ):
getAllSourceFiles( arr , full )
else:
if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
arr.append( full )
return arr
-
+
assertNames = [ "uassert" , "massert" ]
def assignErrorCodes():
@@ -43,6 +44,8 @@ def assignErrorCodes():
out.close()
+codes = []
+
def readErrorCodes( callback ):
ps = [ re.compile( "([um]asser(t|ted)) *\( *(\d+)" ) ,
re.compile( "(User|Msg)Exceptio(n)\( *(\d+)" )
@@ -52,6 +55,7 @@ def readErrorCodes( callback ):
for line in open( x ):
for p in ps:
for m in p.findall( line ):
+ codes.append( ( x , lineNum , line , m[2] ) )
callback( x , lineNum , line , m[2] )
lineNum = lineNum + 1
@@ -78,8 +82,57 @@ def checkErrorCodes():
readErrorCodes( checkDups )
return len( errors ) == 0
+def getBestMessage( err , start ):
+ err = err.partition( start )[2]
+ if not err:
+ return ""
+ err = err.partition( "\"" )[2]
+ if not err:
+ return ""
+ err = err.rpartition( "\"" )[0]
+ if not err:
+ return ""
+ return err
+
+def genErrorOutput():
+
+ g = utils.getGitVersion()
+
+ if os.path.exists( "docs/errors.md" ):
+ i = open( "docs/errors.md" , "r" )
+
+
+ out = open( "docs/errors.md" , 'w' )
+ out.write( "MongoDB Error Codes\n==========\n\n\n" )
+
+ prev = ""
+ seen = {}
+
+ codes.sort( key=lambda x: x[0]+"-"+x[3] )
+ for f,l,line,num in codes:
+ if num in seen:
+ continue
+ seen[num] = True
+
+ if f.startswith( "./" ):
+ f = f[2:]
+
+ if f != prev:
+ out.write( "\n\n" )
+ out.write( f + "\n----\n" )
+ prev = f
+
+ url = "http://github.com/mongodb/mongo/blob/" + g + "/" + f + "#L" + str(l)
+
+ out.write( "* " + str(num) + " [code](" + url + ") " + getBestMessage( line , str(num) ) + "\n" )
+
+ out.write( "\n" )
+ out.close()
+
if __name__ == "__main__":
ok = checkErrorCodes()
print( "ok:" + str( ok ) )
print( "next: " + str( getNextCode() ) )
+ if ok:
+ genErrorOutput()
diff --git a/buildscripts/makealldists.py b/buildscripts/makealldists.py
new file mode 100644
index 0000000..762700e
--- /dev/null
+++ b/buildscripts/makealldists.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+
+from __future__ import with_statement
+import subprocess
+import sys
+import os
+import time
+import tempfile
+import errno
+import glob
+import shutil
+import settings
+import simples3
+
+def s3bucket():
+ return simples3.S3Bucket(settings.bucket, settings.id, settings.key)
+
+def s3cp (bucket, filename, s3name):
+ defaultacl="public-read"
+ print "putting %s to %s" % (filename, s3name)
+ bucket.put(s3name, open(filename, "rb").read(), acl=defaultacl)
+
+def pushrepo(repodir):
+ files=subprocess.Popen(['find', repodir, '-type', 'f'], stdout=subprocess.PIPE).communicate()[0][:-1].split('\n')
+ bucket=s3bucket()
+ olddebs=[t[0] for t in bucket.listdir(prefix='distros/') if t[0].endswith('.deb')]
+ newdebs=[]
+ for fn in files:
+ if len(fn) == 0:
+ continue
+ tail = fn[len(repodir):]
+ # Note: be very careful not to produce s3names containing
+ # sequences of repeated slashes: s3 doesn't treat a////b as
+ # equivalent to a/b.
+ s3name1='distros-archive/'+time.strftime('%Y%m%d')+tail
+ s3name2='distros'+tail
+ s3cp(bucket, fn, s3name1)
+ s3cp(bucket, fn, s3name2)
+ if s3name1.endswith('.deb'):
+ newdebs.append(s3name1)
+ # FIXME: we ought to clean out old debs eventually, but this will
+ # blow away too much if we're trying to push a subset of what's
+ # supposed to be available.
+ #[bucket.delete(deb) for deb in set(olddebs).difference(set(newdebs))]
+
+def cat (inh, outh):
+ inh.seek(0)
+ for line in inh:
+ outh.write(line)
+ inh.close()
+
+# This generates all tuples from mixed-radix counting system, essentially.
+def gen(listlist):
+ dim=len(listlist)
+ a=[0 for ignore in listlist]
+ while True:
+ yield [listlist[i][a[i]] for i in range(dim)]
+ a[0]+=1
+ for j in range(dim):
+ if a[j] == len(listlist[j]):
+ if j<dim-1:
+ a[j+1]+=1
+ else:
+ return
+ a[j]=0
+
+def dirify(string):
+ return (string if string[-1:] in '\/' else string+'/')
+def fileify(string):
+ return (string if string[-1:] not in '\/' else string.rstrip('\/'))
+
+# WTF: os.makedirs errors if the leaf exists?
+def makedirs(f):
+ try:
+ os.makedirs(f)
+ except OSError: # as exc: # Python >2.5
+ exc=sys.exc_value
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+
+
+
+# This is a fairly peculiar thing to want to do, but our build process
+# creates several apt repositories for each mongo version we build on
+# any given Debian/Ubutnu release. To merge repositories together, we
+# must concatenate the Packages.gz files.
+def merge_directories_concatenating_conflicts (target, sources):
+ print sources
+ target = dirify(target)
+ for source in sources:
+ source = dirify(source)
+ files = subprocess.Popen(["find", source, "-type", "f"], stdout=subprocess.PIPE).communicate()[0].split('\n')
+ for f in files:
+ if f == '':
+ continue
+ rel = f[len(source):]
+ o=target+rel
+ makedirs(os.path.dirname(o))
+ with open(f) as inh:
+ with open(target+rel, "a") as outh:
+ outh.write(inh.read())
+
+
+def parse_mongo_version_spec(spec):
+ l = spec.split(':')
+ if len(l) == 1:
+ l+=['','']
+ elif len(l) == 2:
+ l+=['']
+ return l
+
+def logfh(distro, distro_version, arch):
+ prefix = "%s-%s-%s.log." % (distro, distro_version, arch)
+ # This is a NamedTemporaryFile mostly so that I can tail(1) them
+ # as we go.
+ return tempfile.NamedTemporaryFile("w+b", -1, prefix=prefix)
+
+def spawn(distro, distro_version, arch, spec, directory, opts):
+ argv = ["python", "makedist.py"] + opts + [ directory, distro, distro_version, arch ] + [ spec ]
+# cmd = "mkdir -p %s; cd %s; touch foo.deb; echo %s %s %s %s %s | tee Packages " % ( directory, directory, directory, distro, distro_version, arch, mongo_version )
+# print cmd
+# argv = ["sh", "-c", cmd]
+ fh = logfh(distro, distro_version, arch)
+ print >> fh, "Running %s" % argv
+ # it's often handy to be able to run these things at the shell
+ # manually. FIXME: this ought to be slightly less than thoroughly
+ # ignorant of quoting issues (as is is now).
+ print >> fh, " ".join(argv)
+ fh.flush()
+ proc = subprocess.Popen(argv, stdin=None, stdout=fh, stderr=fh)
+ return (proc, fh, distro, distro_version, arch, spec)
+
+def win(name, logfh, winfh):
+ logfh.seek(0)
+ print >> winfh, "=== Winner %s ===" % name
+ cat(logfh, winfh)
+ print >> winfh, "=== End winner %s ===" % name
+
+def lose(name, logfh, losefh):
+ logfh.seek(0)
+ print >> losefh, "=== Loser %s ===" % name
+ cat(logfh, losefh)
+ print >> losefh, "=== End loser %s ===" % name
+
+def wait(procs, winfh, losefh, winners, losers):
+ print "."
+ sys.stdout.flush()
+ try:
+ (pid, stat) = os.wait()
+ except OSError, err:
+ print >> sys.stderr, "This shouldn't happen."
+ print >> sys.stderr, err
+ next
+ if pid:
+ [tup] = [tup for tup in procs if tup[0].pid == pid]
+ (proc, logfh, distro, distro_version, arch, spec) = tup
+ procs.remove(tup)
+ name = "%s %s %s" % (distro, distro_version, arch)
+ if os.WIFEXITED(stat):
+ if os.WEXITSTATUS(stat) == 0:
+ win(name, logfh, winfh)
+ winners.append(name)
+ else:
+ lose(name, logfh, losefh)
+ losers.append(name)
+ if os.WIFSIGNALED(stat):
+ lose(name, logfh, losefh)
+ losers.append(name)
+
+
+
+def __main__():
+ # FIXME: getopt & --help.
+ print " ".join(sys.argv)
+ branches = sys.argv[-1]
+ makedistopts = sys.argv[1:-1]
+
+ # Output from makedist.py goes here.
+ outputroot=tempfile.mkdtemp()
+ repodir=tempfile.mkdtemp()
+
+ print "makedist output under: %s\ncombined repo: %s\n" % (outputroot, repodir)
+ sys.stdout.flush()
+ # Add more dist/version/architecture tuples as they're supported.
+ dists = (("ubuntu", "10.4"),
+ ("ubuntu", "9.10"),
+ ("ubuntu", "9.4"),
+ ("ubuntu", "8.10"),
+ ("debian", "5.0"),
+ ("centos", "5.4"),
+ ("fedora", "11"),
+ ("fedora", "12"))
+ arches = ("x86", "x86_64")
+# mongos = branches.split(',')
+ # Run a makedist for each distro/version/architecture tuple above.
+ winners = []
+ losers = []
+ winfh=tempfile.TemporaryFile()
+ losefh=tempfile.TemporaryFile()
+ procs = []
+ count = 0
+ for ((distro, distro_version), arch, spec) in gen([dists, arches, [branches]]):
+ # FIXME: now x86 fedoras on RackSpace circa 04/10.
+ if distro == "fedora" and arch == "x86":
+ continue
+ count+=1
+ opts = makedistopts
+ if distro in ["debian", "ubuntu"]:
+ outputdir = "%s/deb/%s" % (outputroot, distro)
+ elif distro in ["centos", "fedora", "redhat"]:
+ outputdir = "%s/rpm/%s/%s/os" % (outputroot, distro, distro_version)
+ else:
+ raise Exception("unsupported distro %s" % distro)
+ #opts += ["--subdirs"]
+
+ procs.append(spawn(distro, distro_version, arch, spec, outputdir, opts))
+
+ if len(procs) == 8:
+ wait(procs, winfh, losefh, winners, losers)
+
+ while procs:
+ wait(procs, winfh, losefh, winners, losers)
+
+ winfh.seek(0)
+ losefh.seek(0)
+ nwinners=len(winners)
+ nlosers=len(losers)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ cat(winfh, sys.stdout)
+ cat(losefh, sys.stdout)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ if count == nwinners + nlosers:
+ print "All jobs accounted for"
+# return 0
+ else:
+ print "Lost some jobs...?"
+ return 1
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ # this is sort of ridiculous, but the outputs from rpmbuild look
+ # like RPM/<arch>, but the repo wants to look like
+ # <arch>/RPM.
+ for dist in os.listdir(outputroot+'/rpm'):
+ if dist in ["centos", "fedora", "redhat"]:
+ distdir="%s/rpm/%s" % (outputroot, dist)
+ rpmdirs = subprocess.Popen(["find", distdir, "-type", "d", "-a", "-name", "RPMS"], stdout=subprocess.PIPE).communicate()[0].split('\n')[:-1]
+ for rpmdir in rpmdirs:
+ for arch in os.listdir(rpmdir):
+ archdir="%s/../%s" % (rpmdir, arch)
+ os.mkdir(archdir)
+ os.rename("%s/%s" % (rpmdir, arch), "%s/RPMS" % (archdir,))
+ os.rmdir(rpmdir)
+
+
+ for flavor in os.listdir(outputroot):
+ argv=["python", "mergerepositories.py", flavor, "%s/%s" % (outputroot, flavor), repodir]
+ print "running %s" % argv
+ print " ".join(argv)
+ r = subprocess.Popen(argv).wait()
+ if r != 0:
+ raise Exception("mergerepositories.py exited %d" % r)
+ print repodir
+ pushrepo(repodir)
+ shutil.rmtree(outputroot)
+ shutil.rmtree(repodir)
+
+ return 0
+
+
+if __name__ == '__main__':
+ __main__()
+
+
+# FIXME: this ought to be someplace else.
+
+# FIXME: remove this comment when the buildbot does this. After this
+# program, run something that amounts to
+#
+# find /tmp/distros -name *.deb -or -name Packages.gz | while read f; do echo "./s3cp.py $f ${f#/tmp/}"; done
+#
+# where ./s3cp.py is a trivial s3 put executable in this directory.
+
+# merge_directories_concatenating_conflicts('/tmp/distros/debian', '/tmp/distros-20100222/debian/HEAD', '/tmp/distros-20100222/debian/r1.3.2','/tmp/distros-20100222/debian/v1.2')
+
+# merge_directories_concatenating_conflicts('/tmp/distros/ubuntu', '/tmp/distros-20100222/ubuntu/HEAD', '/tmp/distros-20100222/ubuntu/r1.3.2', '/tmp/distros-20100222/ubuntu/v1.2')
diff --git a/buildscripts/makedist.py b/buildscripts/makedist.py
index 35383b9..1928b76 100644
--- a/buildscripts/makedist.py
+++ b/buildscripts/makedist.py
@@ -1,23 +1,13 @@
#!/usr/bin/env python
-# makedist.py: make a distro package (on an EC2 instance)
+# makedist.py: make a distro package (on an EC2 (or sometimes
+# RackSpace) instance)
# For ease of use, put a file called settings.py someplace in your
# sys.path, containing something like the following:
# makedist = {
-# # ec2-api-tools needs the following two set in the process
-# # environment.
-# "EC2_HOME": "/path/to/ec2-api-tools",
-# # The EC2 tools won't run at all unless this variable is set to a directory
-# # relative to which a "bin/java" exists.
-# "JAVA_HOME" : "/usr",
-# # All the ec2-api-tools take these two as arguments.
-# # Alternatively, you can set the environment variables EC2_PRIVATE_KEY and EC2_CERT
-# # respectively, leave these two out of settings.py, and let the ec2 tools default.
-# "ec2_pkey": "/path/to/pk-file.pem"
-# "ec2_cert" : "/path/to/cert-file.pem"
-# # This gets supplied to ec2-run-instances to rig up an ssh key for
+# # This gets supplied to EC2 to rig up an ssh key for
# # the remote user.
# "ec2_sshkey" : "key-id",
# # And so we need to tell our ssh processes where to find the
@@ -54,6 +44,14 @@ import socket
import time
import os.path
import tempfile
+import string
+import settings
+
+from libcloud.types import Provider
+from libcloud.providers import get_driver
+from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
+from libcloud.base import Node, NodeImage, NodeSize, NodeState
+from libcloud.ssh import ParamikoSSHClient
# For the moment, we don't handle any of the errors we raise, so it
# suffices to have a simple subclass of Exception that just
@@ -141,139 +139,125 @@ class EC2InstanceConfigurator(BaseConfigurator):
(("centos", "5.4", "x86_64"), "ami-ccb35ea5"),
(("fedora", "8", "x86_64"), "ami-2547a34c"),
(("fedora", "8", "x86"), "ami-5647a33f"))),
+ ("rackspace_imgname",
+ ((("fedora", "11", "x86_64"), "Fedora 11"),
+ (("fedora", "12", "x86_64"), "Fedora 12"),
+ (("fedora", "13", "x86_64"), "Fedora 13"))),
("ec2_mtype",
((("*", "*", "x86"), "m1.small"),
(("*", "*", "x86_64"), "m1.large"))),
]
+class nodeWrapper(object):
+ def __init__(self, configurator, **kwargs):
+ self.terminate = False if "no_terminate" in kwargs else True
+ self.use_internal_name = False
+
+ def getHostname(self):
+ internal_name=self.node.private_ip[0]
+ public_name=self.node.public_ip[0]
+ if not (internal_name or external_name):
+ raise Exception('host has no name?')
+ if self.use_internal_name:
+ # FIXME: by inspection, it seems this is sometimes the
+ # empty string. Dunno if that's EC2 or libcloud being
+ # stupid, but it's not good.
+ if internal_name:
+ return internal_name
+ else:
+ return public_name
+ else:
+ return public_name
+
+ def initwait(self):
+ print "waiting for node to spin up"
+ # Wait for EC2 to tell us the node is running.
+ while 1:
+ n=None
+ # EC2 sometimes takes a while to report a node.
+ for i in range(6):
+ nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
+ if len(nodes)>0:
+ n=nodes[0]
+ break
+ else:
+ time.sleep(10)
+ if not n:
+ raise Exception("couldn't find node with id %s" % self.node.id)
+ if n.state == NodeState.PENDING:
+ time.sleep(10)
+ else:
+ self.node = n
+ break
+ print "ok"
+ # Now wait for the node's sshd to be accepting connections.
+ print "waiting for ssh"
+ sshwait = True
+ if sshwait == False:
+ return
+ while sshwait:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ try:
+ s.connect((self.node.public_ip[0], 22))
+ sshwait = False
+ print "connected on port 22 (ssh)"
+ time.sleep(15) # arbitrary timeout, in case the
+ # remote sshd is slow.
+ except socket.error, err:
+ pass
+ finally:
+ s.close()
+ time.sleep(3) # arbitrary timeout
+ print "ok"
+
+ def __enter__(self):
+ self.start()
+ # Note: we don't do an initwait() in __enter__ because if an
+ # exception is raised during __enter__, __exit__ doesn't get
+ # run (and by inspection RackSpace doesn't let you kill a node
+ # that hasn't finished booting yet).
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
-class EC2Instance (object):
+ def stop(self):
+ if self.terminate:
+ print "Destroying node %s" % self.node.id
+ self.node.destroy()
+ else:
+ print "Not terminating EC2 instance %s." % self.node.id
+
+ def setup(self):
+ pass
+
+class EC2Instance (nodeWrapper):
def __init__(self, configurator, **kwargs):
+ super(EC2Instance, self).__init__(configurator, **kwargs)
# Stuff we need to start an instance: AMI name, key and cert
# files. AMI and mtype default to configuration in this file,
# but can be overridden.
self.ec2_ami = configurator.findOrDefault(kwargs, "ec2_ami")
self.ec2_mtype = configurator.findOrDefault(kwargs, "ec2_mtype")
-
self.use_internal_name = True if "use_internal_name" in kwargs else False
-
- # Authentication stuff defaults according to the conventions
- # of the ec2-api-tools.
- self.ec2_cert=kwargs["ec2_cert"]
- self.ec2_pkey=kwargs["ec2_pkey"]
self.ec2_sshkey=kwargs["ec2_sshkey"]
# FIXME: this needs to be a commandline option
self.ec2_groups = ["default", "buildbot-slave", "dist-slave"]
- self.terminate = False if "no_terminate" in kwargs else True
- def parsedesc (self, hdl):
- line1=hdl.readline()
- splitline1=line1.split()
- (_, reservation, unknown1, groupstr) = splitline1[:4]
- groups = groupstr.split(',')
- self.ec2_reservation = reservation
- self.ec2_unknown1 = unknown1
- self.ec2_groups = groups
- # I haven't seen more than 4 data fields in one of these
- # descriptions, but what do I know?
- if len(splitline1)>4:
- print >> sys.stderr, "more than 4 fields in description line 1\n%s\n" % line1
- self.ec2_extras1 = splitline1[4:]
- line2=hdl.readline()
- splitline2=line2.split()
- # The jerks make it tricky to parse line 2: the fields are
- # dependent on the instance's state.
- (_, instance, ami, status_or_hostname) = splitline2[:4]
- self.ec2_instance = instance
- if ami != self.ec2_ami:
- print >> sys.stderr, "warning: AMI in description isn't AMI we invoked\nwe started %s, but got\n%s", (self.ec2_ami, line2)
- # FIXME: are there other non-running statuses?
- if status_or_hostname in ["pending", "terminated"]:
- self.ec2_status = status_or_hostname
- self.ec2_running = False
- index = 4
- self.ec2_storage = splitline2[index+8]
- else:
- self.ec2_running = True
- index = 6
- self.ec2_status = splitline2[5]
- self.ec2_external_hostname = splitline2[3]
- self.ec2_internal_hostname = splitline2[4]
- self.ec2_external_ipaddr = splitline2[index+8]
- self.ec2_internal_ipaddr = splitline2[index+9]
- self.ec2_storage = splitline2[index+10]
- (sshkey, unknown2, mtype, starttime, zone, unknown3, unknown4, monitoring) = splitline2[index:index+8]
- # FIXME: potential disagreement with the supplied sshkey?
- self.ec2_sshkey = sshkey
- self.ec2_unknown2 = unknown2
- # FIXME: potential disagreement with the supplied mtype?
- self.ec2_mtype = mtype
- self.ec2_starttime = starttime
- self.ec2_zone = zone
- self.ec2_unknown3 = unknown3
- self.ec2_unknown4 = unknown4
- self.ec2_monitoring = monitoring
def start(self):
"Fire up a fresh EC2 instance."
- groups = reduce(lambda x, y : x+y, [["-g", i] for i in self.ec2_groups], [])
- argv = ["ec2-run-instances",
- self.ec2_ami, "-K", self.ec2_pkey, "-C", self.ec2_cert,
- "-k", self.ec2_sshkey, "-t", self.ec2_mtype] + groups
- self.ec2_running = False
- print "running %s" % argv
- proc = subprocess.Popen(argv, stdout=subprocess.PIPE)
- try:
- self.parsedesc(proc.stdout)
- if self.ec2_instance == "":
- raise SimpleError("instance id is empty")
- else:
- print "Instance id: %s" % self.ec2_instance
- finally:
- r = proc.wait()
- if r != 0:
- raise SimpleError("ec2-run-instances exited %d", r)
-
- def initwait(self):
- # poll the instance description until we get a hostname.
- # Note: it seems there can be a time interval after
- # ec2-run-instance finishes during which EC2 will tell us that
- # the instance ID doesn't exist. This is sort of bad.
- state = "pending"
- numtries = 0
- giveup = 5
-
- while not self.ec2_running:
- time.sleep(15) # arbitrary
- argv = ["ec2-describe-instances", "-K", self.ec2_pkey, "-C", self.ec2_cert, self.ec2_instance]
- proc = subprocess.Popen(argv, stdout=subprocess.PIPE)
- try:
- self.parsedesc(proc.stdout)
- except Exception, e:
- r = proc.wait()
- if r < giveup:
- print sys.stderr, str(e)
- continue
- else:
- raise SimpleError("ec2-describe-instances exited %d", r)
- numtries+=1
-
- def stop(self):
- if self.terminate:
- LocalHost.runLocally(["ec2-terminate-instances", "-K", self.ec2_pkey, "-C", self.ec2_cert, self.ec2_instance])
- else:
- print "Not terminating EC2 instance %s." % self.ec2_instance
-
- def __enter__(self):
- self.start()
- return self
-
- def __exit__(self, type, value, traceback):
- self.stop()
-
- def getHostname(self):
- return self.ec2_internal_hostname if self.use_internal_name else self.ec2_external_hostname
+ EC2 = get_driver(Provider.EC2)
+ self.driver = EC2NodeDriver(settings.id, settings.key)
+ image = NodeImage(self.ec2_ami, self.ec2_ami, EC2)
+ size = NodeSize(self.ec2_mtype, self.ec2_mtype, None, None, None, None, EC2)
+ self.node = self.driver.create_node(image=image, name=self.ec2_ami, size=size, keyname=self.ec2_sshkey, securitygroup=self.ec2_groups)
+ print "Created node %s" % self.node.id
+
+ def list_nodes(self):
+ return self.driver.list_nodes()
class SshConnectionConfigurator (BaseConfigurator):
def __init__(self, **kwargs):
@@ -287,6 +271,7 @@ class SshConnectionConfigurator (BaseConfigurator):
(("ubuntu", "9.4", "*"), "root"),
(("ubuntu", "8.10", "*"), "root"),
(("ubuntu", "8.4", "*"), "ubuntu"),
+ (("fedora", "*", "*"), "root"),
(("centos", "*", "*"), "root"))),
]
@@ -300,28 +285,7 @@ class SshConnection (object):
# Gets set to False when we think we can ssh in.
self.sshwait = True
- def sshWait(self):
- "Poll until somebody's listening on port 22"
-
- if self.sshwait == False:
- return
- while self.sshwait:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- try:
- s.connect((self.ssh_host, 22))
- self.sshwait = False
- print "connected on port 22 (ssh)"
- time.sleep(15) # arbitrary timeout, in case the
- # remote sshd is slow.
- except socket.error, err:
- pass
- finally:
- s.close()
- time.sleep(3) # arbitrary timeout
-
def initSsh(self):
- self.sshWait()
ctlpath="/tmp/ec2-ssh-%s-%s-%s" % (self.ssh_host, self.ssh_login, os.getpid())
argv = ["ssh", "-o", "StrictHostKeyChecking no",
"-M", "-o", "ControlPath %s" % ctlpath,
@@ -349,7 +313,6 @@ class SshConnection (object):
self.ssh_host] + argv)
def sendFiles(self, files):
- self.sshWait()
for (localfile, remotefile) in files:
LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
"-o", "ControlMaster auto",
@@ -360,8 +323,6 @@ class SshConnection (object):
("" if remotefile is None else remotefile) ])
def recvFiles(self, files):
- self.sshWait()
- print files
for (remotefile, localfile) in files:
LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
"-o", "ControlMaster auto",
@@ -402,7 +363,8 @@ s/^Package:.*mongodb/Package: {pkg_name}{pkg_name_suffix}\\
Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|$(CURDIR)/debian/mongodb/|$(CURDIR)/debian/{pkg_name}{pkg_name_suffix}/|g' debian/rules) || exit 1
( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|debian/mongodb.manpages|debian/{pkg_name}{pkg_name_suffix}.manpages|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}/; /^Version:/s/.*/Version: {pkg_version}/;' rpm/mongo.spec )
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}\\
+Conflicts: {pkg_name_conflicts}/; /^Version:/s/.*/Version: {pkg_version}/; /Requires.*mongo/s/mongo/{pkg_name}{pkg_name_suffix}/;' rpm/mongo.spec )
# Debian systems require some ridiculous workarounds to get an init
# script at /etc/init.d/mongodb when the packge name isn't the init
# script name. Note: dh_installinit --name won't work, because that
@@ -412,6 +374,22 @@ Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
ln debian/init.d debian/{pkg_name}{pkg_name_suffix}.mongodb.init &&
ln debian/mongodb.upstart debian/{pkg_name}{pkg_name_suffix}.mongodb.upstart &&
sed -i 's/dh_installinit/dh_installinit --name=mongodb/' debian/rules) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
+"""
+
+ # If we're just packaging up nightlies, do this:
+ nightly_build_mangle_files="""
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/scons[[:space:]]*$/d; s^scons.*install^mkdir -p debian/{pkg_name}{pkg_name_suffix} \&\& wget http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG mv -v ARG debian/{pkg_name}{pkg_name_suffix}/usr \&\& (rm debian/{pkg_name}{pkg_name_suffix}/usr/bin/mongosniff || true)^' debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^BuildRequires:.*//; s/scons.*\ -c//; s/scons.*\ all//; s^scons.*install^(mkdir -p $RPM_BUILD_ROOT/usr ; cd /tmp \&\& curl http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz > mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG cp -pRv ARG $RPM_BUILD_ROOT/usr \&\& (rm -r $RPM_BUILD_ROOT/usr/bin/mongosniff $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a $RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/include/mongo || true))^' rpm/mongo.spec)
+# Upstream nightlies no longer contain libmongoclient.
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/%package devel/{{N;N;d;}}; /%description devel/{{N;N;N;N;N;d;}}; /%files devel/{{N;N;N;d;}};' rpm/mongo.spec )
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
+"""
+#$RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a
+ mangle_files_for_new_deb_xulrunner_commands = """
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/xulrunner-dev/xulrunner-1.9.2-dev/g' debian/control )
"""
mangle_files_for_ancient_redhat_commands = """
@@ -432,8 +410,10 @@ mkdir -p "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
mkdir -p "{pkg_product_dir}/{distro_version}/10gen/source"
( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}"; debuild ) || exit 1
# Try installing it
-dpkg -i *.deb
+dpkg -i {pkg_name}{pkg_name_suffix}*.deb
ps ax | grep mongo || {{ echo "no running mongo" >/dev/stderr; exit 1; }}
+dpkg --remove $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
+dpkg --purge $(for f in {pkg_name}{pkg_name_suffix}*.deb ; do echo ${{f%%_*}}; done)
cp {pkg_name}{pkg_name_suffix}*.deb "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
cp {pkg_name}{pkg_name_suffix}*.dsc "{pkg_product_dir}/{distro_version}/10gen/source"
cp {pkg_name}{pkg_name_suffix}*.tar.gz "{pkg_product_dir}/{distro_version}/10gen/source"
@@ -445,10 +425,11 @@ rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release
yum -y install {pkg_prereq_str}
"""
rpm_build_commands="""
-for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p /usr/src/redhat/$d; done
-cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" /usr/src/redhat/SPECS
-tar -cpzf /usr/src/redhat/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-rpmbuild -ba /usr/src/redhat/SPECS/mongo.spec
+for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p {rpmbuild_dir}/$d; done
+cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
+tar -cpzf {rpmbuild_dir}/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
+rpmbuild -ba --target={distro_arch} {rpmbuild_dir}/SPECS/{pkg_name}{pkg_name_suffix}.spec
+# FIXME: should install the rpms, check if mongod is running.
"""
# FIXME: this is clean, but adds 40 minutes or so to the build process.
old_rpm_precommands = """
@@ -474,25 +455,28 @@ rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-devel-1.38.0-1.{distro_arch}.r
# On very old Debianoids, libboost-<foo>-dev will be some old
# boost that's not as thready as we want, but which Eliot says
- # will work.
- very_old_deb_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner1.9-dev"]
+ # will work; on very new Debianoids, libbost-<foo>-dev is what we
+ # want.
+ unversioned_deb_boost_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev"]
+ # On some in-between Debianoids, libboost-<foo>-dev is still a
+ # 1.34, but 1.35 packages are available, so we want those.
+ versioned_deb_boost_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev"]
- # On less old (but still old!) Debianoids, libboost-<foo>-dev is
- # still a 1.34, but 1.35 packages are available, so we want those.
- old_deb_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev", "xulrunner-dev"]
+ unversioned_deb_xulrunner_prereqs = ["xulrunner-dev"]
- # On newer Debianoids, libbost-<foo>-dev is some sufficiently new
- # thing.
- new_deb_prereqs = [ "libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner-dev" ]
+ old_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9-dev"]
+ new_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9.2-dev"]
common_deb_prereqs = [ "build-essential", "dpkg-dev", "libreadline-dev", "libpcap-dev", "libpcre3-dev", "git-core", "scons", "debhelper", "devscripts", "git-core" ]
centos_preqres = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
- fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
+ fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git", "curl" ]
def __init__(self, **kwargs):
super(ScriptFileConfigurator, self).__init__(**kwargs)
- if kwargs["mongo_version"][0] == 'r':
+ # FIXME: this method is disabled until we get back around to
+ # actually building from source.
+ if None: # kwargs["mongo_version"][0] == 'r':
self.get_mongo_commands = """
wget -Otarball.tgz "http://github.com/mongodb/mongo/tarball/{mongo_version}";
tar xzf tarball.tgz
@@ -502,7 +486,9 @@ mv "`tar tzf tarball.tgz | sed 's|/.*||' | sort -u | head -n1`" "{pkg_name}{pkg_
self.get_mongo_commands = """
git clone git://github.com/mongodb/mongo.git
"""
- if kwargs['mongo_version'][0] == 'v':
+ # This is disabled for the moment. it's for building the
+ # tip of some versioned branch.
+ if None: #kwargs['mongo_version'][0] == 'v':
self.get_mongo_commands +="""
( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "`git log origin/{mongo_version} | sed -n '1s/^commit //p;q'`" ) | tar xf -
"""
@@ -518,81 +504,188 @@ git clone git://github.com/mongodb/mongo.git
self.configuration += [("pkg_product_dir",
((("ubuntu", "*", "*"), self.deb_productdir),
(("debian", "*", "*"), self.deb_productdir),
- (("fedora", "*", "*"), self.rpm_productdir),
- (("centos", "*", "*"), self.rpm_productdir))),
+ (("fedora", "*", "*"), "~/rpmbuild/RPMS"),
+ (("centos", "*", "*"), "/usr/src/redhat/RPMS"))),
("pkg_prereqs",
((("ubuntu", "9.4", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "9.10", "*"),
- self.new_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "10.4", "*"),
- self.new_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "8.10", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "8.4", "*"),
- self.very_old_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.old_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("debian", "5.0", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
- (("fedora", "8", "*"),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
+ (("fedora", "*", "*"),
self.fedora_prereqs),
(("centos", "5.4", "*"),
self.centos_preqres))),
+ # FIXME: this is deprecated
("commands",
((("debian", "*", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("ubuntu", "*", "*"),
+ self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
+ (("ubuntu", "10.4", "*"),
+ self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands + self.deb_build_commands),
+ (("ubuntu", "*", "*"),
self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
(("centos", "*", "*"),
self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands + self.rpm_build_commands),
(("fedora", "*", "*"),
self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.rpm_build_commands))),
+ ("preamble_commands",
+ ((("*", "*", "*"), self.preamble_commands),
+ )),
+ ("install_prereqs",
+ ((("debian", "*", "*"), self.deb_prereq_commands),
+ (("ubuntu", "*", "*"), self.deb_prereq_commands),
+ (("centos", "*", "*"), self.rpm_prereq_commands),
+ (("fedora", "*", "*"), self.rpm_prereq_commands))),
+ ("get_mongo",
+ ((("*", "*", "*"), self.get_mongo_commands),
+ )),
+ ("mangle_mongo",
+ ((("debian", "*", "*"), self.mangle_files_commands),
+ (("ubuntu", "10.4", "*"),
+ self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
+ (("ubuntu", "*", "*"), self.mangle_files_commands),
+ (("centos", "*", "*"),
+ self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands),
+ (("fedora", "*", "*"),
+ self.mangle_files_commands))),
+ ("build_prerequisites",
+ ((("fedora", "*", "*"), self.old_rpm_precommands),
+ (("centos", "*", "*"), self.old_rpm_precommands),
+ (("*", "*", "*"), ''))),
+ ("install_for_packaging",
+ ((("debian", "*", "*"),""),
+ (("ubuntu", "*", "*"),""),
+ (("fedora", "*", "*"), ""),
+ (("centos", "*", "*"),""))),
+ ("build_package",
+ ((("debian", "*", "*"),
+ self.deb_build_commands),
+ (("ubuntu", "*", "*"),
+ self.deb_build_commands),
+ (("fedora", "*", "*"),
+ self.rpm_build_commands),
+ (("centos", "*", "*"),
+ self.rpm_build_commands))),
("pkg_name",
((("debian", "*", "*"), "mongodb"),
(("ubuntu", "*", "*"), "mongodb"),
(("centos", "*", "*"), "mongo"),
-
- (("fedora", "*", "*"), "mongo")
- )),
+ (("fedora", "*", "*"), "mongo"))),
+ # FIXME: there should be a command-line argument for this.
("pkg_name_conflicts",
- ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot"]),
- ))
- ]
+ ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot", "-oldstable"]),
+ )),
+ ("rpmbuild_dir",
+ ((("fedora", "*", "*"), "~/rpmbuild"),
+ (("centos", "*", "*"), "/usr/src/redhat"),
+ (("*", "*","*"), ''),
+ )),
+ ]
class ScriptFile(object):
def __init__(self, configurator, **kwargs):
- self.mongo_version = kwargs["mongo_version"]
- self.pkg_version = kwargs["pkg_version"]
- self.pkg_name_suffix = kwargs["pkg_name_suffix"] if "pkg_name_suffix" in kwargs else ""
+ self.configurator = configurator
+ self.mongo_version_spec = kwargs['mongo_version_spec']
+ self.mongo_arch = kwargs["arch"] if kwargs["arch"] == "x86_64" else "i686"
self.pkg_prereqs = configurator.default("pkg_prereqs")
self.pkg_name = configurator.default("pkg_name")
self.pkg_product_dir = configurator.default("pkg_product_dir")
- self.pkg_name_conflicts = configurator.default("pkg_name_conflicts") if self.pkg_name_suffix else []
- self.pkg_name_conflicts.remove(self.pkg_name_suffix) if self.pkg_name_suffix and self.pkg_name_suffix in self.pkg_name_conflicts else []
- self.formatter = configurator.default("commands")
+ #self.formatter = configurator.default("commands")
self.distro_name = configurator.default("distro_name")
self.distro_version = configurator.default("distro_version")
self.distro_arch = configurator.default("distro_arch")
+ def bogoformat(self, fmt, **kwargs):
+ r = ''
+ i = 0
+ while True:
+ c = fmt[i]
+ if c in '{}':
+ i+=1
+ c2=fmt[i]
+ if c2 == c:
+ r+=c
+ else:
+ j=i
+ while True:
+ p=fmt[j:].find('}')
+ if p == -1:
+ raise Exception("malformed format string starting at %d: no closing brace" % i)
+ else:
+ j+=p
+ if len(fmt) > (j+1) and fmt[j+1]=='}':
+ j+=2
+ else:
+ break
+ key = fmt[i:j]
+ r+=kwargs[key]
+ i=j
+ else:
+ r+=c
+ i+=1
+ if i==len(fmt):
+ return r
+
+ def fmt(self, formatter, **kwargs):
+ try:
+ return string.Formatter.format(formatter, kwargs)
+ finally:
+ return self.bogoformat(formatter, **kwargs)
+
def genscript(self):
- return self.formatter.format(mongo_version=self.mongo_version,
- distro_name=self.distro_name,
- distro_version=self.distro_version,
- distro_arch=self.distro_arch,
- pkg_prereq_str=" ".join(self.pkg_prereqs),
- pkg_name=self.pkg_name,
- pkg_name_suffix=self.pkg_name_suffix,
- pkg_version=self.pkg_version,
- pkg_product_dir=self.pkg_product_dir,
- # KLUDGE: rpm specs and deb
- # control files use
- # comma-separated conflicts,
- # but there's no reason to
- # suppose this works elsewhere
- pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in self.pkg_name_conflicts])
- )
+ script=''
+ formatter = self.configurator.default("preamble_commands") + self.configurator.default("install_prereqs")
+ script+=self.fmt(formatter,
+ distro_name=self.distro_name,
+ distro_version=self.distro_version,
+ distro_arch=self.distro_arch,
+ pkg_name=self.pkg_name,
+ pkg_product_dir=self.pkg_product_dir,
+ mongo_arch=self.mongo_arch,
+ pkg_prereq_str=" ".join(self.pkg_prereqs),
+ )
+
+ specs=self.mongo_version_spec.split(',')
+ for spec in specs:
+ (version, pkg_name_suffix, pkg_version) = parse_mongo_version_spec(spec)
+ mongo_version = version if version[0] != 'n' else ('HEAD' if version == 'nlatest' else 'r'+version[1:]) #'HEAD'
+ mongo_pub_version = version.lstrip('n') if version[0] in 'n' else 'latest'
+ pkg_name_suffix = pkg_name_suffix if pkg_name_suffix else ''
+ pkg_version = pkg_version
+ pkg_name_conflicts = list(self.configurator.default("pkg_name_conflicts") if pkg_name_suffix else [])
+ pkg_name_conflicts.remove(pkg_name_suffix) if pkg_name_suffix and pkg_name_suffix in pkg_name_conflicts else []
+ formatter = self.configurator.default("get_mongo") + self.configurator.default("mangle_mongo") + (self.configurator.nightly_build_mangle_files if version[0] == 'n' else '') +(self.configurator.default("build_prerequisites") if version[0] != 'n' else '') + self.configurator.default("install_for_packaging") + self.configurator.default("build_package")
+ script+=self.fmt(formatter,
+ mongo_version=mongo_version,
+ distro_name=self.distro_name,
+ distro_version=self.distro_version,
+ distro_arch=self.distro_arch,
+ pkg_prereq_str=" ".join(self.pkg_prereqs),
+ pkg_name=self.pkg_name,
+ pkg_name_suffix=pkg_name_suffix,
+ pkg_version=pkg_version,
+ pkg_product_dir=self.pkg_product_dir,
+ # KLUDGE: rpm specs and deb
+ # control files use
+ # comma-separated conflicts,
+ # but there's no reason to
+ # suppose this works elsewhere
+ pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in pkg_name_conflicts]),
+ mongo_arch=self.mongo_arch,
+ mongo_pub_version=mongo_pub_version,
+ rpmbuild_dir=self.configurator.default('rpmbuild_dir'))
+ script+='rm -rf mongo'
+ return script
def __enter__(self):
self.localscript=None
@@ -614,6 +707,69 @@ class ScriptFile(object):
class Configurator(SshConnectionConfigurator, EC2InstanceConfigurator, ScriptFileConfigurator, BaseHostConfigurator):
def __init__(self, **kwargs):
super(Configurator, self).__init__(**kwargs)
+
+class rackspaceInstance(nodeWrapper):
+ def __init__(self, configurator, **kwargs):
+ super(rackspaceInstance, self).__init__(configurator, **kwargs)
+ self.imgname=configurator.default('rackspace_imgname')
+
+ def start(self):
+ driver = get_driver(Provider.RACKSPACE)
+ self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
+ name=self.imgname+'-'+str(os.getpid())
+ images=filter(lambda x: (x.name.find(self.imgname) > -1), self.conn.list_images())
+ sizes=self.conn.list_sizes()
+ sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
+ node = None
+ if len(images) > 1:
+ raise Exception("too many images with \"%s\" in the name" % self.imgname)
+ if len(images) < 1:
+ raise Exception("too few images with \"%s\" in the name" % self.imgname)
+ image = images[0]
+ self.node = self.conn.create_node(image=image, name=name, size=sizes[0])
+ # Note: the password is available only in the response to the
+ # create_node request, not in subsequent list_nodes()
+ # requests; so although the node objects we get back from
+ # list_nodes() are usuable for most things, we must hold onto
+ # the initial password.
+ self.password = self.node.extra['password']
+ print self.node
+
+ def list_nodes(self):
+ return self.conn.list_nodes()
+
+ def setup(self):
+ self.putSshKey()
+
+ def putSshKey(self):
+ keyfile=settings.makedist['ssh_keyfile']
+ ssh = ParamikoSSHClient(hostname = self.node.public_ip[0], password = self.password)
+ ssh.connect()
+ print "putting ssh public key"
+ ssh.put(".ssh/authorized_keys", contents=open(keyfile+'.pub').read(), chmod=0600)
+ print "ok"
+
+def parse_mongo_version_spec (spec):
+ foo = spec.split(":")
+ mongo_version = foo[0] # this can be a commit id, a
+ # release id "r1.2.2", or a branch name
+ # starting with v.
+ if len(foo) > 1:
+ pkg_name_suffix = foo[1]
+ if len(foo) > 2 and foo[2]:
+ pkg_version = foo[2]
+ else:
+ pkg_version = time.strftime("%Y%m%d")
+ if not pkg_name_suffix:
+ if mongo_version[0] in ["r", "v"]:
+ nums = mongo_version.split(".")
+ if int(nums[1]) % 2 == 0:
+ pkg_name_suffix = "-stable"
+ else:
+ pkg_name_suffix = "-unstable"
+ else:
+ pkg_name_suffix = ""
+ return (mongo_version, pkg_name_suffix, pkg_version)
def main():
# checkEnvironment()
@@ -629,59 +785,28 @@ def main():
try:
import settings
if "makedist" in dir ( settings ):
- for key in ["EC2_HOME", "JAVA_HOME"]:
- if key in settings.makedist:
- os.environ[key] = settings.makedist[key]
- for key in ["ec2_pkey", "ec2_cert", "ec2_sshkey", "ssh_keyfile" ]:
+ for key in ["ec2_sshkey", "ssh_keyfile", "gpg_homedir" ]:
if key not in kwargs and key in settings.makedist:
kwargs[key] = settings.makedist[key]
except Exception, err:
print "No settings: %s. Continuing anyway..." % err
pass
- # Ensure that PATH contains $EC2_HOME/bin
- vars = ["EC2_HOME", "JAVA_HOME"]
- for var in vars:
- if os.getenv(var) == None:
- raise SimpleError("Environment variable %s is unset; did you create a settings.py?", var)
-
- if len([True for x in os.environ["PATH"].split(":") if x.find(os.environ["EC2_HOME"]) > -1]) == 0:
- os.environ["PATH"]=os.environ["EC2_HOME"]+"/bin:"+os.environ["PATH"]
-
-
kwargs["distro_name"] = distro_name
kwargs["distro_version"] = distro_version
kwargs["arch"] = arch
-
- foo = mongo_version_spec.split(":")
- kwargs["mongo_version"] = foo[0] # this can be a commit id, a
- # release id "r1.2.2", or a
- # branch name starting with v.
- if len(foo) > 1:
- kwargs["pkg_name_suffix"] = foo[1]
- if len(foo) > 2 and foo[2]:
- kwargs["pkg_version"] = foo[2]
- else:
- kwargs["pkg_version"] = time.strftime("%Y%m%d")
-
+ kwargs['mongo_version_spec'] = mongo_version_spec
+
+ kwargs["localdir"] = rootdir
# FIXME: this should also include the mongo version or something.
- if "subdirs" in kwargs:
- kwargs["localdir"] = "%s/%s/%s/%s" % (rootdir, distro_name, distro_version, arch, kwargs["mongo_version"])
- else:
- kwargs["localdir"] = rootdir
+# if "subdirs" in kwargs:
+# kwargs["localdir"] = "%s/%s/%s/%s/%s" % (rootdir, distro_name, distro_version, arch, kwargs["mongo_version"])
+# else:
+
- if "pkg_name_suffix" not in kwargs:
- if kwargs["mongo_version"][0] in ["r", "v"]:
- nums = kwargs["mongo_version"].split(".")
- if int(nums[1]) % 2 == 0:
- kwargs["pkg_name_suffix"] = "-stable"
- else:
- kwargs["pkg_name_suffix"] = "-unstable"
- else:
- kwargs["pkg_name_suffix"] = ""
- kwargs['local_gpg_dir'] = kwargs["local_gpg_dir"] if "local_gpg_dir" in kwargs else os.path.expanduser("~/.gnupg")
+ kwargs['gpg_homedir'] = kwargs["gpg_homedir"] if "gpg_homedir" in kwargs else os.path.expanduser("~/.gnupg")
configurator = Configurator(**kwargs)
LocalHost.runLocally(["mkdir", "-p", kwargs["localdir"]])
with ScriptFile(configurator, **kwargs) as script:
@@ -689,15 +814,18 @@ def main():
print """# Going to run the following on a fresh AMI:"""
print f.read()
time.sleep(10)
- with EC2Instance(configurator, **kwargs) as ec2:
- ec2.initwait()
- kwargs["ssh_host"] = ec2.getHostname()
+ # FIXME: it's not the best to have two different pathways for
+ # the different hosting services, but...
+ with EC2Instance(configurator, **kwargs) if kwargs['distro_name'] != 'fedora' else rackspaceInstance(configurator, **kwargs) as host:
+ host.initwait()
+ host.setup()
+ kwargs["ssh_host"] = host.getHostname()
with SshConnection(configurator, **kwargs) as ssh:
ssh.runRemotely(["uname -a; ls /"])
ssh.runRemotely(["mkdir", "pkg"])
if "local_mongo_dir" in kwargs:
ssh.sendFiles([(kwargs["local_mongo_dir"]+'/'+d, "pkg") for d in ["rpm", "debian"]])
- ssh.sendFiles([(kwargs['local_gpg_dir'], ".gnupg")])
+ ssh.sendFiles([(kwargs['gpg_homedir'], ".gnupg")])
ssh.sendFiles([(script.localscript, "makedist.sh")])
ssh.runRemotely((["sudo"] if ssh.ssh_login != "root" else [])+ ["sh", "makedist.sh"])
ssh.recvFiles([(script.pkg_product_dir, kwargs['localdir'])])
@@ -709,7 +837,7 @@ def processArguments():
("N", "no-terminate", False, "Leave the EC2 instance running at the end of the job", None),
("S", "subdirs", False, "Create subdirectories of the output directory based on distro name, version, and architecture", None),
("I", "use-internal-name", False, "Use the EC2 internal hostname for sshing", None),
- (None, "local-gpg-dir", True, "Local directory of gpg junk", "STRING"),
+ (None, "gpg-homedir", True, "Local directory of gpg junk", "STRING"),
(None, "local-mongo-dir", True, "Copy packaging files from local mongo checkout", "DIRECTORY"),
]
shortopts = "".join([t[0] + (":" if t[2] else "") for t in flagspec if t[0] is not None])
@@ -746,11 +874,12 @@ def processArguments():
MONGO-VERSION-SPEC has the syntax
Commit(:Pkg-Name-Suffix(:Pkg-Version)). If Commit starts with an 'r',
-build from a tagged release; if Commit starts with a 'v', build from
-the HEAD of a version branch; otherwise, build whatever git commit is
-identified by Commit. Pkg-Name-Suffix gets appended to the package
-name, and defaults to "-stable" and "-unstable" if Commit looks like
-it designates a stable or unstable release/branch, respectively.
+build from a tagged release; if Commit starts with an 'n', package up
+a nightly build; if Commit starts with a 'v', build from the HEAD of a
+version branch; otherwise, build whatever git commit is identified by
+Commit. Pkg-Name-Suffix gets appended to the package name, and
+defaults to "-stable" and "-unstable" if Commit looks like it
+designates a stable or unstable release/branch, respectively.
Pkg-Version is used as the package version, and defaults to YYYYMMDD.
Examples:
@@ -779,8 +908,7 @@ Options:"""
print "%-20s\t%s." % ("%4s--%s%s:" % ("-%s, " % t[0] if t[0] else "", t[1], ("="+t[4]) if t[4] else ""), t[3])
print """
Mandatory arguments to long options are also mandatory for short
-options. Some EC2 arguments default to (and override) environment
-variables; see the ec2-api-tools documentation."""
+options."""
sys.exit(0)
if "usage" in kwargs:
@@ -796,4 +924,5 @@ if __name__ == "__main__":
# Examples:
-# ./makedist.py --local-gpg-dir=$HOME/10gen/dst/dist-gnupg /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot
+# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot,v1.4:-stable,v1.5:-unstable
+# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 nlatest:-snapshot,n1.4.2:-stable,n1.5.0:-unstable
diff --git a/buildscripts/mergerepositories.py b/buildscripts/mergerepositories.py
new file mode 100644
index 0000000..bc50d08
--- /dev/null
+++ b/buildscripts/mergerepositories.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+
+from __future__ import with_statement
+from libcloud.types import Provider
+from libcloud.providers import get_driver
+from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
+from libcloud.base import Node, NodeImage, NodeSize, NodeState
+
+# libcloud's SSH client seems to be one of those pointless wrappers
+# that (at the moment) both doesn't add anything to the thing it wraps
+# (Paramiko) and also fails to expose the underlying thing's features.
+# What's wrong with people?
+#from libcloud.ssh import SSHClient
+
+import time
+import sys
+import settings
+import subprocess
+import os
+import socket
+
+EC2 = get_driver(Provider.EC2)
+EC2Driver=EC2NodeDriver(settings.id, settings.key)
+
+def tryEC2():
+
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ node = None
+ try:
+ node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, keyname="kp1", securitygroup=['default', 'dist-slave', 'buildbot-slave'])
+ print node
+ print node.id
+ while node.state == NodeState.PENDING:
+ time.sleep(3)
+ finally:
+ if node:
+ node.destroy()
+
+
+class node(object):
+ def initWait(self):
+ while 1:
+ n=None
+ # EC2 sometimes takes a while to report a node.
+ for i in range(6):
+ nodes = [n for n in self.list_nodes() if (n.id==self.node.id)]
+ if len(nodes)>0:
+ n=nodes[0]
+ break
+ else:
+ time.sleep(10)
+ if not n:
+ raise Exception("couldn't find node with id %s" % self.node.id)
+ if n.state == NodeState.PENDING:
+ time.sleep(10)
+ else:
+ self.node = n
+ break
+ print "ok"
+ # Now wait for the node's sshd to be accepting connections.
+ print "waiting for ssh"
+ sshwait = True
+ if sshwait == False:
+ return
+ while sshwait:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ try:
+ s.connect((self.node.public_ip[0], 22))
+ sshwait = False
+ print "connected on port 22 (ssh)"
+ time.sleep(15) # arbitrary timeout, in case the
+ # remote sshd is slow.
+ except socket.error, err:
+ pass
+ finally:
+ s.close()
+ time.sleep(3) # arbitrary timeout
+ print "ok"
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, arg0, arg1, arg2):
+ print "shutting down node %s" % self.node
+ self.node.destroy()
+
+# I don't think libcloud's Nodes implement __enter__ and __exit__, and
+# I like the with statement for ensuring that we don't leak nodes when
+# we don't have to.
+class ec2node(node):
+ def list_nodes(self):
+ return EC2Driver.list_nodes()
+
+class ubuntuNode(ec2node):
+ def __init__(self):
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
+
+class centosNode(ec2node):
+ def __init__(self):
+ image=NodeImage('ami-ccb35ea5', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
+
+class rackspaceNode(node):
+ def list_nodes(self):
+ self.conn.list_nodes()
+
+class fedora11Node(rackspaceNode):
+ def __init__(self):
+ driver = get_driver(Provider.RACKSPACE)
+ self.conn = driver(settings.rackspace_account, settings.rackspace_api_key)
+ string='Fedora 11'
+ images=filter(lambda x: (x.name.find(string) > -1), self.conn.list_images())
+ sizes=self.conn.list_sizes()
+ sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
+ node = None
+ if len(images) != 1:
+ raise "too many images with \"%s\" in the name" % string
+ image = images[0]
+ self.node = self.conn.create_node(image=image, name=string, size=sizes[0])
+ print self.node
+ self.password = self.node.extra['password']
+
+class Err(Exception):
+ pass
+
+def merge_yum_repo(dir, outdir):
+ dirtail=dir.rstrip('\/').split('/')[-1]
+ keyfile=settings.makedist['ssh_keyfile']
+ makeyumrepo="""find . -name RPMS | while read dir; do (cd $dir/.. && createrepo .); done"""
+ with centosNode() as centos:
+ centos.initWait()
+ print centos.node
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "root@"+centos.node.public_ip[0]+":"])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "root@"+centos.node.public_ip[0], "cd ./" + dirtail + " && " + makeyumrepo])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "root@"+centos.node.public_ip[0]+":./"+dirtail +'/*', outdir])
+
+
+
+def merge_apt_repo(dir, outdir):
+ dirtail=dir.rstrip('\/').split('/')[-1]
+
+ gpgdir=settings.makedist['gpg_homedir']
+ keyfile=settings.makedist['ssh_keyfile']
+
+ makeaptrepo="""for x in debian ubuntu; do (cd $x; for d in `find . -name *.deb | sed 's|^./||; s|/[^/]*$||' | sort -u`; do dpkg-scanpackages $d > $d/Packages; gzip -9c $d/Packages > $d/Packages.gz; done) ; done"""
+ makereleaseprologue="""Origin: 10gen
+Label: 10gen
+Suite: 10gen
+Codename: VVVVVV
+Version: VVVVVV
+Architectures: i386 amd64
+Components: 10gen
+Description: 10gen packages"""
+ makeaptrelease="""find . -maxdepth 3 -mindepth 3 | while read d; do ( cd $d && (echo '%s' | sed s/VVVVVV/$(basename $(pwd))/; apt-ftparchive release .) > /tmp/Release && mv /tmp/Release . && gpg -r `gpg --list-keys | grep uid | awk '{print $(NF)}'` --no-secmem-warning --no-tty -abs --output Release.gpg Release ); done""" % makereleaseprologue
+ with ubuntuNode() as ubuntu:
+ ubuntu.initWait()
+ print ubuntu.node
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sudo", "sh", "-c", "\"export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -y install debhelper\""])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "ubuntu@"+ubuntu.node.public_ip[0]+":"])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", gpgdir, "ubuntu@"+ubuntu.node.public_ip[0]+":.gnupg"])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sh", "-c", "\"ls -lR ./" + dirtail + "\""])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrepo])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrelease])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no", "-i", keyfile, "-r", "ubuntu@"+ubuntu.node.public_ip[0]+":./"+dirtail +'/*', outdir])
+
+
+def run_for_effect(argv):
+ print " ".join(argv)
+ r=subprocess.Popen(argv).wait()
+ if r!=0:
+ raise Err("subprocess %s exited %d" % (argv, r))
+
+if __name__ == "__main__":
+ (flavor, dir, outdir) = sys.argv[-3:]
+
+ if flavor == "deb":
+ merge_apt_repo(dir, outdir)
+ elif flavor == "rpm":
+ merge_yum_repo(dir, outdir)
+ else:
+ Err("unknown pkg flavor %s" % flavor)
+ # TODO: yum repositories
+
+
+ #main()
+ #tryRackSpace()
diff --git a/buildscripts/smoke.py b/buildscripts/smoke.py
new file mode 100755
index 0000000..0023226
--- /dev/null
+++ b/buildscripts/smoke.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from __future__ import with_statement
+from subprocess import Popen, PIPE, call
+import os
+import sys
+import utils
+import time
+import socket
+from optparse import OptionParser
+import atexit
+import glob
+import shutil
+import re
+import parser
+
+mongoRepo = os.getcwd() #'./'
+testPath = None
+
+mongodExecutable = "./mongod"
+mongodPort = "32000"
+shellExecutable = "./mongo"
+continueOnFailure = False
+oneMongodPerTest = False
+
+tests = []
+winners = []
+losers = {}
+
+# Finally, atexit functions seem to be a little oblivious to whether
+# Python is exiting because of an error, so we'll use this to
+# communicate with the report() function.
+exit_bad = True
+
+# For replication hash checking
+replicated_dbs = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smokeDbPrefix = ''
+smallOplog = False
+
+# This class just implements the with statement API, for a sneaky
+# purpose below.
+class nothing(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, type, value, traceback):
+ return not isinstance(value, Exception)
+
+class mongod(object):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.stop()
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ return not isinstance(value, Exception)
+
+ def ensureTestDirs(self):
+ utils.ensureDir( smokeDbPrefix + "/tmp/unittest/" )
+ utils.ensureDir( smokeDbPrefix + "/data/" )
+ utils.ensureDir( smokeDbPrefix + "/data/db/" )
+
+ def checkMongoPort( self, port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def didMongodStart( self, port=mongodPort, timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ self.checkMongoPort( int(port) )
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ timeout = timeout - 1
+ return False
+
+ def start(self):
+ global mongodPort
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensureTestDirs()
+ dirName = smokeDbPrefix + "/data/db/sconsTests/"
+ self.port = int(mongodPort)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dirName = smokeDbPrefix + '/data/db/sconsTestsSlave/'
+ srcport = mongodPort
+ self.port += 1
+ self.slave = True
+ if os.path.exists ( dirName ):
+ if 'slave' in self.kwargs:
+ argv = ["python", "buildscripts/cleanbb.py", '--nokill', dirName]
+
+ else:
+ argv = ["python", "buildscripts/cleanbb.py", dirName]
+ call( argv )
+ utils.ensureDir( dirName )
+ argv = [mongodExecutable, "--port", str(self.port), "--dbpath", dirName]
+ if self.kwargs.get('smallOplog'):
+ argv += ["--master", "--oplogSize", "10"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:'+str(srcport)]
+ print "running " + " ".join(argv)
+ self.proc = Popen(argv)
+ if not self.didMongodStart( self.port ):
+ raise Exception( "Failed to start mongod" )
+
+ if self.slave:
+ while True:
+ argv = [shellExecutable, "--port", str(self.port), "--quiet", "--eval", 'db.printSlaveReplicationInfo()']
+ res = Popen(argv, stdout=PIPE).communicate()[0]
+ if res.find('initial sync') < 0:
+ break
+
+
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ # This function not available in Python 2.5
+ self.proc.terminate()
+ except AttributeError:
+ if os.sys.platform == "win32":
+ import win32process
+ win32process.TerminateProcess(self.proc._handle, -1)
+ else:
+ from os import kill
+ kill( self.proc.pid, 15 )
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def checkDbHashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave to catch up..."
+ ARB=10 # ARBITRARY
+ time.sleep(ARB)
+ while True:
+ # FIXME: it's probably better to do an empty insert and a
+ # getLastError() to force a sync.
+ argv = [shellExecutable, "--port", str(slave.port), "--quiet", "--eval", 'db.printSlaveReplicationInfo()']
+ res = Popen(argv, stdout=PIPE).communicate()[0]
+ m = re.search('(\d+)secs ', res)
+ if int(m.group(1)) > ARB: #res.find('initial sync') < 0:
+ break
+ time.sleep(3)
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ argv = [shellExecutable, "--port", str(mongod.port), "--quiet", "--eval", "x=db.runCommand('dbhash'); printjson(x.collections)"]
+ hashstr = Popen(argv, stdout=PIPE).communicate()[0]
+ # WARNING FIXME KLUDGE et al.: this is sleazy and unsafe.
+ mongod.dict = eval(hashstr)
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_dbs
+
+ for db in replicated_dbs:
+ if db not in slave.dict:
+ lost_in_slave.append(db)
+ mhash = master.dict[db]
+ shash = slave.dict[db]
+ if mhash != shash:
+ screwy_in_slave[db] = mhash + "/" + shash
+ for db in slave.dict.keys():
+ if db not in master.dict:
+ lost_in_master.append(db)
+ replicated_dbs += master.dict.keys()
+
+# Blech.
+def skipTest(path):
+ if smallOplog:
+ if os.path.basename(path) in ["cursor8.js", "indexh.js"]:
+ return True
+ return False
+
+def runTest(test):
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ if skipTest(path):
+ print "skippping " + path
+ return
+ if ext == ".js":
+ argv=[shellExecutable, "--port", mongodPort]
+ if not usedb:
+ argv += ["--nodb"]
+ if smallOplog:
+ argv += ["--eval", 'testingReplication = true;']
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
+ argv=[path]
+ # more blech
+ elif os.path.basename(path) == 'mongos':
+ argv=[path, "--test"]
+ else:
+ argv=[testPath and os.path.abspath(os.path.join(testPath, path)) or path,
+ "--port", mongodPort]
+ else:
+ raise Bug("fell off in extenstion case: %s" % path)
+ print " *******************************************"
+ print " Test : " + os.path.basename(path) + " ..."
+ t1=time.time()
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ r = call(argv, cwd=testPath)
+ t2=time.time()
+ print " " + str((t2-t1)*1000) + "ms"
+ if r != 0:
+ raise TestExitFailure(path, r)
+ if Popen( [ mongodExecutable, "msg", "ping", mongodPort ], stdout=PIPE ).communicate()[0].count( "****ok" ) == 0:
+ raise TestServerFailure(path)
+ if call( [ mongodExecutable, "msg", "ping", mongodPort ] ) != 0:
+ raise TestServerFailure(path)
+ print ""
+
+def runTests(tests):
+ # If we're in one-mongo-per-test mode, we instantiate a nothing
+ # around the loop, and a mongod inside the loop.
+
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+ with nothing() if oneMongodPerTest else mongod(smallOplog=smallOplog) as master1:
+ with nothing() if oneMongodPerTest else (mongod(slave=True) if smallOplog else nothing()) as slave1:
+ for test in tests:
+ try:
+ with mongod(smallOplog=smallOplog) if oneMongodPerTest else nothing() as master2:
+ with mongod(slave=True) if oneMongodPerTest and smallOplog else nothing() as slave2:
+ runTest(test)
+ winners.append(test)
+ if isinstance(slave2, mongod):
+ checkDbHashes(master2, slave2)
+ except TestFailure, f:
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ if not oneMongodPerTest:
+ return 2
+ except TestFailure, f:
+ if not continueOnFailure:
+ return 1
+ if isinstance(slave1, mongod):
+ checkDbHashes(master1, slave1)
+
+ return 0
+
+def report():
+ print "%d test%s succeeded" % (len(winners), '' if len(winners) == 1 else 's')
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for db in screwy_in_slave.keys():
+ print "%s\t %s" % (db, screwy_in_slave[db])
+ if smallOplog and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_dbs))
+ if (exit_bad or losers or lost_in_slave or lost_in_master or screwy_in_slave):
+ status = 1
+ else:
+ status = 0
+ exit (status)
+
+def expandSuites(suites):
+ globstr = None
+ global mongoRepo, tests
+ for suite in suites:
+ if suite == 'smokeAll':
+ tests = []
+ expandSuites(['smoke', 'smokePerf', 'smokeClient', 'smokeJs', 'smokeJsPerf', 'smokeJsSlowNightly', 'smokeJsSlowWeekly', 'smokeParallel', 'smokeClone', 'smokeParallel', 'smokeRepl', 'smokeAuth', 'smokeSharding', 'smokeTool'])
+ break
+ if suite == 'smoke':
+ if os.sys.platform == "win32":
+ program = 'test.exe'
+ else:
+ program = 'test'
+ (globstr, usedb) = (program, False)
+ elif suite == 'smokePerf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'smokeJs':
+ # FIXME: _runner.js seems equivalent to "[!_]*.js".
+ #(globstr, usedb) = ('_runner.js', True)
+ (globstr, usedb) = ('[!_]*.js', True)
+ elif suite == 'smokeQuota':
+ (globstr, usedb) = ('quota/*.js', True)
+ elif suite == 'smokeJsPerf':
+ (globstr, usedb) = ('perf/*.js', True)
+ elif suite == 'smokeDisk':
+ (globstr, usedb) = ('disk/*.js', True)
+ elif suite == 'smokeJsSlowNightly':
+ (globstr, usedb) = ('slowNightly/*.js', True)
+ elif suite == 'smokeJsSlowWeekly':
+ (globstr, usedb) = ('slowWeekly/*.js', True)
+ elif suite == 'smokeParallel':
+ (globstr, usedb) = ('parallel/*.js', True)
+ elif suite == 'smokeClone':
+ (globstr, usedb) = ('clone/*.js', False)
+ elif suite == 'smokeRepl':
+ (globstr, usedb) = ('repl/*.js', False)
+ elif suite == 'smokeReplSets':
+ (globstr, usedb) = ('replsets/*.js', False)
+ elif suite == 'smokeAuth':
+ (globstr, usedb) = ('auth/*.js', False)
+ elif suite == 'smokeSharding':
+ (globstr, usedb) = ('sharding/*.js', False)
+ elif suite == 'smokeTool':
+ (globstr, usedb) = ('tool/*.js', False)
+ # well, the above almost works for everything...
+ elif suite == 'smokeClient':
+ paths = ["firstExample", "secondExample", "whereExample", "authTest", "clientTest", "httpClientTest"]
+ if os.sys.platform == "win32":
+ paths = [path+'.exe' for path in paths]
+ # hack
+ tests += [(testPath and path or os.path.join(mongoRepo, path), False) for path in paths]
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongoRepo, program), False)]
+ else:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ globstr = os.path.join(mongoRepo, (os.path.join(('jstests/' if globstr.endswith('.js') else ''), globstr)))
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ if not tests:
+ raise Exception( "no tests found" )
+ return tests
+
+def main():
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests. (default "suite")')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # th we don't have the freedom to run from anyplace.
+# parser.add_option('--mongo-repo', dest='mongoRepo', default=None,
+# help='Top-level directory of mongo checkout to use. (default: script will make a guess)')
+ parser.add_option('--test-path', dest='testPath', default=None,
+ help="Path to the test executables to run "
+ "(currently only used for smokeClient)")
+ parser.add_option('--mongod', dest='mongodExecutable', #default='./mongod',
+ help='Path to mongod to run (default "./mongod")')
+ parser.add_option('--port', dest='mongodPort', default="32000",
+ help='Port the mongod will bind to (default 32000)')
+ parser.add_option('--mongo', dest='shellExecutable', #default="./mongo",
+ help='Path to mongo, for .js test files (default "./mongo")')
+ parser.add_option('--continue-on-failure', dest='continueOnFailure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--one-mongod-per-test', dest='oneMongodPerTest',
+ action="store_true", default=False,
+ help='If supplied, run each test in a fresh mongod')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smokeDbPrefix', default='',
+ help="Prefix to use for the mongods' dbpaths.")
+ parser.add_option('--small-oplog', dest='smallOplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ global tests
+ (options, tests) = parser.parse_args()
+
+# global mongoRepo
+# if options.mongoRepo:
+# pass
+# mongoRepo = options.mongoRepo
+# else:
+# prefix = ''
+# while True:
+# if os.path.exists(prefix+'buildscripts'):
+# mongoRepo = os.path.normpath(prefix)
+# break
+# else:
+# prefix += '../'
+# # FIXME: will this be a device's root directory on
+# # Windows?
+# if os.path.samefile('/', prefix):
+# raise Exception("couldn't guess the mongo repository path")
+
+ print tests
+
+ global mongoRepo, mongodExecutable, mongodPort, shellExecutable, continueOnFailure, oneMongodPerTest, smallOplog, smokeDbPrefix, testPath
+ testPath = options.testPath
+ mongodExecutable = options.mongodExecutable if options.mongodExecutable else os.path.join(mongoRepo, 'mongod')
+ mongodPort = options.mongodPort if options.mongodPort else mongodPort
+ shellExecutable = options.shellExecutable if options.shellExecutable else os.path.join(mongoRepo, 'mongo')
+ continueOnFailure = options.continueOnFailure if options.continueOnFailure else continueOnFailure
+ oneMongodPerTest = options.oneMongodPerTest if options.oneMongodPerTest else oneMongodPerTest
+ smokeDbPrefix = options.smokeDbPrefix
+ smallOplog = options.smallOplog
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ with open(options.File) as f:
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if not tests:
+ raise Exception( "no tests specified" )
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ # Suites: smoke, smokePerf, smokeJs, smokeQuota, smokeJsPerf,
+ # smokeJsSlow, smokeParalell, smokeClone, smokeRepl, smokeDisk
+ suites = tests
+ tests = []
+ expandSuites(suites)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), True) for test in tests]
+
+ runTests(tests)
+ global exit_bad
+ exit_bad = False
+
+atexit.register(report)
+
+if __name__ == "__main__":
+ main()
diff --git a/buildscripts/utils.py b/buildscripts/utils.py
index 41d6767..1ca2fdd 100644
--- a/buildscripts/utils.py
+++ b/buildscripts/utils.py
@@ -2,9 +2,50 @@
import re
import socket
import time
-
+import os
# various utilities that are handy
+def getGitBranch():
+ if not os.path.exists( ".git" ):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+
def execsys( args ):
import subprocess
if isinstance( args , str ):
@@ -24,6 +65,40 @@ def getprocesslist():
r = re.compile( "[\r\n]+" )
return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
def checkMongoPort( port=27017 ):
sock = socket.socket()
@@ -32,6 +107,7 @@ def checkMongoPort( port=27017 ):
sock.connect(("localhost", port))
sock.close()
+
def didMongodStart( port=27017 , timeout=20 ):
while timeout > 0:
time.sleep( 1 )
@@ -41,7 +117,5 @@ def didMongodStart( port=27017 , timeout=20 ):
except Exception,e:
print( e )
timeout = timeout - 1
-
return False
-
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index 566095a..6178257 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -15,10 +15,11 @@
* limitations under the License.
*/
-#include "stdafx.h"
+#include "pch.h"
#include "../client/dbclient.h"
#include "../db/dbhelpers.h"
#include "../db/cmdline.h"
+#include "../s/shard.h"
namespace mongo {
@@ -57,10 +58,15 @@ namespace mongo {
uassert( 10256 , "no createDirectClient in clientOnly" , 0 );
return 0;
}
-/*
- auto_ptr<CursorIterator> Helpers::find( const char *ns , BSONObj query , bool requireIndex ){
- uassert( 10000 , "Helpers::find can't be used in client" , 0 );
- return auto_ptr<CursorIterator>(0);
+
+ void Shard::getAllShards( vector<Shard>& all ){
+ assert(0);
+ }
+
+ bool Shard::isAShard( const string& ident ){
+ assert(0);
+ return false;
}
-*/
+
+
}
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 5a08483..dae13f6 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -18,76 +18,99 @@
// _ todo: reconnect?
-#include "stdafx.h"
+#include "pch.h"
#include "connpool.h"
#include "../db/commands.h"
#include "syncclusterconnection.h"
+#include "../s/shard.h"
namespace mongo {
DBConnectionPool pool;
+ DBClientBase* DBConnectionPool::_get(const string& ident) {
+ scoped_lock L(_mutex);
+
+ PoolForHost& p = _pools[ident];
+ if ( p.pool.empty() )
+ return 0;
+
+ DBClientBase *c = p.pool.top();
+ p.pool.pop();
+ return c;
+ }
+
+ DBClientBase* DBConnectionPool::_finishCreate( const string& host , DBClientBase* conn ){
+ {
+ scoped_lock L(_mutex);
+ PoolForHost& p = _pools[host];
+ p.created++;
+ }
+
+ onCreate( conn );
+ onHandedOut( conn );
+
+ return conn;
+ }
+
+ DBClientBase* DBConnectionPool::get(const ConnectionString& url) {
+ DBClientBase * c = _get( url.toString() );
+ if ( c ){
+ onHandedOut( c );
+ return c;
+ }
+
+ string errmsg;
+ c = url.connect( errmsg );
+ uassert( 13328 , (string)"dbconnectionpool: connect failed " + url.toString() + " : " + errmsg , c );
+
+ return _finishCreate( url.toString() , c );
+ }
+
DBClientBase* DBConnectionPool::get(const string& host) {
- scoped_lock L(poolMutex);
+ DBClientBase * c = _get( host );
+ if ( c ){
+ onHandedOut( c );
+ return c;
+ }
- PoolForHost *&p = pools[host];
- if ( p == 0 )
- p = new PoolForHost();
- if ( p->pool.empty() ) {
- int numCommas = DBClientBase::countCommas( host );
- DBClientBase *c;
-
- if( numCommas == 0 ) {
- DBClientConnection *cc = new DBClientConnection(true);
- log(2) << "creating new connection for pool to:" << host << endl;
- string errmsg;
- if ( !cc->connect(host.c_str(), errmsg) ) {
- delete cc;
- uassert( 11002 , (string)"dbconnectionpool: connect failed " + host , false);
- return 0;
- }
- c = cc;
- onCreate( c );
- }
- else if ( numCommas == 1 ) {
- DBClientPaired *p = new DBClientPaired();
- if( !p->connect(host) ) {
- delete p;
- uassert( 11003 , (string)"dbconnectionpool: connect failed [2] " + host , false);
- return 0;
- }
- c = p;
- }
- else if ( numCommas == 2 ) {
- c = new SyncClusterConnection( host );
- }
- else {
- uassert( 13071 , (string)"invalid hostname [" + host + "]" , 0 );
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( host , errmsg );
+ uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
+
+ c = cs.connect( errmsg );
+ uassert( 11002 , (string)"dbconnectionpool: connect failed " + host + " : " + errmsg , c );
+ return _finishCreate( host , c );
+ }
+
+ DBConnectionPool::~DBConnectionPool(){
+ for ( map<string,PoolForHost>::iterator i = _pools.begin(); i != _pools.end(); i++ ){
+ PoolForHost& p = i->second;
+
+ while ( ! p.pool.empty() ){
+ DBClientBase * c = p.pool.top();
+ delete c;
+ p.pool.pop();
}
- return c;
}
- DBClientBase *c = p->pool.top();
- p->pool.pop();
- onHandedOut( c );
- return c;
}
void DBConnectionPool::flush(){
- scoped_lock L(poolMutex);
- for ( map<string,PoolForHost*>::iterator i = pools.begin(); i != pools.end(); i++ ){
- PoolForHost* p = i->second;
+ scoped_lock L(_mutex);
+ for ( map<string,PoolForHost>::iterator i = _pools.begin(); i != _pools.end(); i++ ){
+ PoolForHost& p = i->second;
vector<DBClientBase*> all;
- while ( ! p->pool.empty() ){
- DBClientBase * c = p->pool.top();
- p->pool.pop();
+ while ( ! p.pool.empty() ){
+ DBClientBase * c = p.pool.top();
+ p.pool.pop();
all.push_back( c );
bool res;
c->isMaster( res );
}
for ( vector<DBClientBase*>::iterator i=all.begin(); i != all.end(); i++ ){
- p->pool.push( *i );
+ p.pool.push( *i );
}
}
}
@@ -114,6 +137,26 @@ namespace mongo {
}
}
+ void DBConnectionPool::appendInfo( BSONObjBuilder& b ){
+ scoped_lock lk( _mutex );
+ BSONObjBuilder bb( b.subobjStart( "hosts" ) );
+ for ( map<string,PoolForHost>::iterator i=_pools.begin(); i!=_pools.end(); ++i ){
+ string s = i->first;
+ BSONObjBuilder temp( bb.subobjStart( s.c_str() ) );
+ temp.append( "available" , (int)(i->second.pool.size()) );
+ temp.appendNumber( "created" , i->second.created );
+ temp.done();
+ }
+ bb.done();
+ }
+
+ ScopedDbConnection * ScopedDbConnection::steal(){
+ assert( _conn );
+ ScopedDbConnection * n = new ScopedDbConnection( _host , _conn );
+ _conn = 0;
+ return n;
+ }
+
ScopedDbConnection::~ScopedDbConnection() {
if ( _conn ){
if ( ! _conn->isFailed() ) {
@@ -124,20 +167,44 @@ namespace mongo {
}
}
+ ScopedDbConnection::ScopedDbConnection(const Shard& shard )
+ : _host( shard.getConnString() ) , _conn( pool.get(_host) ){
+ }
+
+ ScopedDbConnection::ScopedDbConnection(const Shard* shard )
+ : _host( shard->getConnString() ) , _conn( pool.get(_host) ){
+ }
+
class PoolFlushCmd : public Command {
public:
- PoolFlushCmd() : Command( "connpoolsync" ){}
- virtual LockType locktype(){ return NONE; }
- virtual bool run(const char*, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
+ PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ){}
+ virtual void help( stringstream &help ) const { help<<"internal"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
pool.flush();
- result << "ok" << 1;
return true;
}
- virtual bool slaveOk(){
+ virtual bool slaveOk() const {
return true;
}
} poolFlushCmd;
+ class PoolStats : public Command {
+ public:
+ PoolStats() : Command( "connPoolStats" ){}
+ virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
+ pool.appendInfo( result );
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ } poolStatsCmd;
+
+
} // namespace mongo
diff --git a/client/connpool.h b/client/connpool.h
index b44ff51..00570c5 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -19,20 +19,30 @@
#include <stack>
#include "dbclient.h"
+#include "redef_macros.h"
namespace mongo {
+ class Shard;
+
struct PoolForHost {
+ PoolForHost()
+ : created(0){}
+ PoolForHost( const PoolForHost& other ){
+ assert(other.pool.size() == 0);
+ created = other.created;
+ assert( created == 0 );
+ }
+
std::stack<DBClientBase*> pool;
+ long long created;
};
class DBConnectionHook {
public:
virtual ~DBConnectionHook(){}
-
virtual void onCreate( DBClientBase * conn ){}
virtual void onHandedOut( DBClientBase * conn ){}
-
};
/** Database connection pool.
@@ -51,33 +61,54 @@ namespace mongo {
}
*/
class DBConnectionPool {
- mongo::mutex poolMutex;
- map<string,PoolForHost*> pools; // servername -> pool
+ mongo::mutex _mutex;
+ map<string,PoolForHost> _pools; // servername -> pool
list<DBConnectionHook*> _hooks;
+
+ DBClientBase* _get( const string& ident );
+ DBClientBase* _finishCreate( const string& ident , DBClientBase* conn );
+
+ public:
+ DBConnectionPool() : _mutex("DBConnectionPool") { }
+ ~DBConnectionPool();
+
+
void onCreate( DBClientBase * conn );
void onHandedOut( DBClientBase * conn );
- public:
+
void flush();
+
DBClientBase *get(const string& host);
+ DBClientBase *get(const ConnectionString& host);
+
void release(const string& host, DBClientBase *c) {
if ( c->isFailed() ){
delete c;
return;
}
- scoped_lock L(poolMutex);
- pools[host]->pool.push(c);
+ scoped_lock L(_mutex);
+ _pools[host].pool.push(c);
}
void addHook( DBConnectionHook * hook );
+ void appendInfo( BSONObjBuilder& b );
};
-
+
extern DBConnectionPool pool;
+ class AScopedConnection : boost::noncopyable {
+ public:
+ virtual ~AScopedConnection(){}
+ virtual DBClientBase* get() = 0;
+ virtual void done() = 0;
+ virtual string getHost() const = 0;
+ };
+
/** Use to get a connection from the pool. On exceptions things
clean up nicely.
*/
- class ScopedDbConnection {
- const string host;
+ class ScopedDbConnection : public AScopedConnection {
+ const string _host;
DBClientBase *_conn;
public:
/** get the associated connection object */
@@ -85,19 +116,42 @@ namespace mongo {
uassert( 11004 , "did you call done already" , _conn );
return _conn;
}
-
+
/** get the associated connection object */
DBClientBase& conn() {
uassert( 11005 , "did you call done already" , _conn );
return *_conn;
}
+ /** get the associated connection object */
+ DBClientBase* get() {
+ uassert( 13102 , "did you call done already" , _conn );
+ return _conn;
+ }
+
+ ScopedDbConnection()
+ : _host( "" ) , _conn(0) {
+ }
+
/** throws UserException if can't connect */
- ScopedDbConnection(const string& _host) :
- host(_host), _conn( pool.get(_host) ) {
- //cout << " for: " << _host << " got conn: " << _conn << endl;
+ ScopedDbConnection(const string& host)
+ : _host(host), _conn( pool.get(host) ) {
+ }
+
+ ScopedDbConnection(const string& host, DBClientBase* conn )
+ : _host( host ) , _conn( conn ){
+ }
+
+ ScopedDbConnection(const Shard& shard );
+ ScopedDbConnection(const Shard* shard );
+
+ ScopedDbConnection(const ConnectionString& url )
+ : _host(url.toString()), _conn( pool.get(url) ) {
}
+
+ string getHost() const { return _host; }
+
/** Force closure of the connection. You should call this if you leave it in
a bad state. Destructor will do this too, but it is verbose.
*/
@@ -121,12 +175,16 @@ namespace mongo {
kill();
else
*/
- pool.release(host, _conn);
+ pool.release(_host, _conn);
_conn = 0;
}
+ ScopedDbConnection * steal();
+
~ScopedDbConnection();
};
} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/client/constants.h b/client/constants.h
new file mode 100644
index 0000000..66aa9b1
--- /dev/null
+++ b/client/constants.h
@@ -0,0 +1,26 @@
+// constants.h
+
+#pragma once
+
+namespace mongo {
+
+ /* query results include a 32 result flag word consisting of these bits */
+ enum ResultFlagType {
+ /* returned, with zero results, when getMore is called but the cursor id
+ is not valid at the server. */
+ ResultFlag_CursorNotFound = 1,
+
+ /* { $err : ... } is being returned */
+ ResultFlag_ErrSet = 2,
+
+ /* Have to update config from the server, usually $err is also set */
+ ResultFlag_ShardConfigStale = 4,
+
+ /* for backward compatability: this let's us know the server supports
+ the QueryOption_AwaitData option. if it doesn't, a repl slave client should sleep
+ a little between getMore's.
+ */
+ ResultFlag_AwaitCapable = 8
+ };
+
+}
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index f617f7c..04b6147 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -15,22 +15,89 @@
* limitations under the License.
*/
-#include "stdafx.h"
+#include "pch.h"
#include "../db/pdfile.h"
#include "dbclient.h"
-#include "../util/builder.h"
+#include "../bson/util/builder.h"
#include "../db/jsobj.h"
#include "../db/json.h"
#include "../db/instance.h"
#include "../util/md5.hpp"
#include "../db/dbmessage.h"
#include "../db/cmdline.h"
+#include "connpool.h"
+#include "../s/util.h"
+#include "syncclusterconnection.h"
namespace mongo {
+ DBClientBase* ConnectionString::connect( string& errmsg ) const {
+ switch ( _type ){
+ case MASTER: {
+ DBClientConnection * c = new DBClientConnection(true);
+ log(1) << "creating new connection to:" << _servers[0] << endl;
+ if ( ! c->connect( _servers[0] , errmsg ) ) {
+ delete c;
+ return 0;
+ }
+ return c;
+ }
+
+ case PAIR:
+ case SET: {
+ DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers );
+ if( ! set->connect() ){
+ delete set;
+ errmsg = "connect failed to set ";
+ errmsg += toString();
+ return 0;
+ }
+ return set;
+ }
+
+ case SYNC: {
+ // TODO , don't copy
+ list<HostAndPort> l;
+ for ( unsigned i=0; i<_servers.size(); i++ )
+ l.push_back( _servers[i] );
+ return new SyncClusterConnection( l );
+ }
+
+ case INVALID:
+ throw UserException( 13421 , "trying to connect to invalid ConnectionString" );
+ break;
+ }
+
+ assert( 0 );
+ return 0;
+ }
+
+ ConnectionString ConnectionString::parse( const string& host , string& errmsg ){
+
+ string::size_type i = host.find( '/' );
+ if ( i != string::npos ){
+ // replica set
+ return ConnectionString( SET , host.substr( i + 1 ) , host.substr( 0 , i ) );
+ }
+
+ int numCommas = DBClientBase::countCommas( host );
+
+ if( numCommas == 0 )
+ return ConnectionString( HostAndPort( host ) );
+
+ if ( numCommas == 1 )
+ return ConnectionString( PAIR , host );
+
+ if ( numCommas == 2 )
+ return ConnectionString( SYNC , host );
+
+ errmsg = (string)"invalid hostname [" + host + "]";
+ return ConnectionString(); // INVALID
+ }
+
Query& Query::where(const string &jscode, BSONObj scope) {
/* use where() before sort() and hint() and explain(), else this will assert. */
- assert( !obj.hasField("query") );
+ assert( ! isComplex() );
BSONObjBuilder b;
b.appendElements(obj);
b.appendWhere(jscode, scope);
@@ -39,7 +106,7 @@ namespace mongo {
}
void Query::makeComplex() {
- if ( obj.hasElement( "query" ) )
+ if ( isComplex() )
return;
BSONObjBuilder b;
b.append( "query", obj );
@@ -76,19 +143,36 @@ namespace mongo {
return *this;
}
- bool Query::isComplex() const{
- return obj.hasElement( "query" );
+ bool Query::isComplex( bool * hasDollar ) const{
+ if ( obj.hasElement( "query" ) ){
+ if ( hasDollar )
+ hasDollar[0] = false;
+ return true;
+ }
+
+ if ( obj.hasElement( "$query" ) ){
+ if ( hasDollar )
+ hasDollar[0] = true;
+ return true;
+ }
+
+ return false;
}
BSONObj Query::getFilter() const {
- if ( ! isComplex() )
+ bool hasDollar;
+ if ( ! isComplex( &hasDollar ) )
return obj;
- return obj.getObjectField( "query" );
+
+ return obj.getObjectField( hasDollar ? "$query" : "query" );
}
BSONObj Query::getSort() const {
if ( ! isComplex() )
return BSONObj();
- return obj.getObjectField( "orderby" );
+ BSONObj ret = obj.getObjectField( "orderby" );
+ if (ret.isEmpty())
+ ret = obj.getObjectField( "$orderby" );
+ return ret;
}
BSONObj Query::getHint() const {
if ( ! isComplex() )
@@ -109,6 +193,17 @@ namespace mongo {
return o["ok"].trueValue();
}
+ enum QueryOptions DBClientWithCommands::availableOptions() {
+ if ( !_haveCachedAvailableOptions ) {
+ BSONObj ret;
+ if ( runCommand( "admin", BSON( "availablequeryoptions" << 1 ), ret ) ) {
+ _cachedAvailableOptions = ( enum QueryOptions )( ret.getIntField( "options" ) );
+ }
+ _haveCachedAvailableOptions = true;
+ }
+ return _cachedAvailableOptions;
+ }
+
inline bool DBClientWithCommands::runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options) {
string ns = dbname + ".$cmd";
info = findOne(ns, cmd, 0 , options);
@@ -133,7 +228,7 @@ namespace mongo {
BSONObj res;
if( !runCommand(ns.db.c_str(), cmd, res, options) )
uasserted(11010,string("count fails:") + res.toString());
- return res.getIntField("n");
+ return res["n"].numberLong();
}
BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
@@ -146,10 +241,14 @@ namespace mongo {
string DBClientWithCommands::getLastError() {
BSONObj info = getLastErrorDetailed();
+ return getLastErrorString( info );
+ }
+
+ string DBClientWithCommands::getLastErrorString( const BSONObj& info ){
BSONElement e = info["err"];
if( e.eoo() ) return "";
if( e.type() == Object ) return e.toString();
- return e.str();
+ return e.str();
}
BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
@@ -223,13 +322,14 @@ namespace mongo {
bool DBClientWithCommands::isMaster(bool& isMaster, BSONObj *info) {
BSONObj o;
- if ( info == 0 ) info = &o;
+ if ( info == 0 )
+ info = &o;
bool ok = runCommand("admin", ismastercmdobj, *info);
- isMaster = (info->getIntField("ismaster") == 1);
+ isMaster = info->getField("ismaster").trueValue();
return ok;
}
- bool DBClientWithCommands::createCollection(const string &ns, unsigned size, bool capped, int max, BSONObj *info) {
+ bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
BSONObj o;
if ( info == 0 ) info = &o;
BSONObjBuilder b;
@@ -346,64 +446,9 @@ namespace mongo {
string db = nsGetDB( ns ) + ".system.namespaces";
BSONObj q = BSON( "name" << ns );
- return count( db.c_str() , q );
- }
-
-
- void testSort() {
- DBClientConnection c;
- string err;
- if ( !c.connect("localhost", err) ) {
- out() << "can't connect to server " << err << endl;
- return;
- }
-
- cout << "findOne returns:" << endl;
- cout << c.findOne("test.foo", QUERY( "x" << 3 ) ).toString() << endl;
- cout << c.findOne("test.foo", QUERY( "x" << 3 ).sort("name") ).toString() << endl;
-
- }
-
- /* TODO: unit tests should run this? */
- void testDbEval() {
- DBClientConnection c;
- string err;
- if ( !c.connect("localhost", err) ) {
- out() << "can't connect to server " << err << endl;
- return;
- }
-
- if( !c.auth("dwight", "u", "p", err) ) {
- out() << "can't authenticate " << err << endl;
- return;
- }
-
- BSONObj info;
- BSONElement retValue;
- BSONObjBuilder b;
- b.append("0", 99);
- BSONObj args = b.done();
- bool ok = c.eval("dwight", "function() { return args[0]; }", info, retValue, &args);
- out() << "eval ok=" << ok << endl;
- out() << "retvalue=" << retValue.toString() << endl;
- out() << "info=" << info.toString() << endl;
-
- out() << endl;
-
- int x = 3;
- assert( c.eval("dwight", "function() { return 3; }", x) );
-
- out() << "***\n";
-
- BSONObj foo = fromjson("{\"x\":7}");
- out() << foo.toString() << endl;
- int res=0;
- ok = c.eval("dwight", "function(parm1) { return parm1.x; }", foo, res);
- out() << ok << " retval:" << res << endl;
+ return count( db.c_str() , q ) != 0;
}
- void testPaired();
-
/* --- dbclientconnection --- */
bool DBClientConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
@@ -422,48 +467,42 @@ namespace mongo {
return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
}
- BSONObj DBClientInterface::findOne(const string &ns, Query query, const BSONObj *fieldsToReturn, int queryOptions) {
+ BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
auto_ptr<DBClientCursor> c =
this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
- massert( 10276 , "DBClientBase::findOne: transport error", c.get() );
+ uassert( 10276 , "DBClientBase::findOne: transport error", c.get() );
+
+ if ( c->hasResultFlag( ResultFlag_ShardConfigStale ) )
+ throw StaleConfigException( ns , "findOne has stale config" );
if ( !c->more() )
return BSONObj();
- return c->next().copy();
+ return c->nextSafe().copy();
+ }
+
+ bool DBClientConnection::connect(const HostAndPort& server, string& errmsg){
+ _server = server;
+ _serverString = _server.toString();
+ return _connect( errmsg );
}
- bool DBClientConnection::connect(const string &_serverAddress, string& errmsg) {
- serverAddress = _serverAddress;
+ bool DBClientConnection::_connect( string& errmsg ){
+ _serverString = _server.toString();
+ // we keep around SockAddr for connection life -- maybe MessagingPort
+ // requires that?
+ server.reset(new SockAddr(_server.host().c_str(), _server.port()));
+ p.reset(new MessagingPort( _timeout, _logLevel ));
- string ip;
- int port;
- size_t idx = serverAddress.find( ":" );
- if ( idx != string::npos ) {
- port = strtol( serverAddress.substr( idx + 1 ).c_str(), 0, 10 );
- ip = serverAddress.substr( 0 , idx );
- ip = hostbyname(ip.c_str());
- } else {
- port = CmdLine::DefaultDBPort;
- ip = hostbyname( serverAddress.c_str() );
- }
- if( ip.empty() ) {
- stringstream ss;
- ss << "client connect: couldn't parse/resolve hostname: " << _serverAddress;
- errmsg = ss.str();
+ if (server->getAddr() == "0.0.0.0"){
failed = true;
return false;
}
- // we keep around SockAddr for connection life -- maybe MessagingPort
- // requires that?
- server = auto_ptr<SockAddr>(new SockAddr(ip.c_str(), port));
- p = auto_ptr<MessagingPort>(new MessagingPort());
-
if ( !p->connect(*server) ) {
stringstream ss;
- ss << "couldn't connect to server " << serverAddress << " " << ip << ":" << port;
+ ss << "couldn't connect to server " << _serverString << '}';
errmsg = ss.str();
failed = true;
return false;
@@ -480,22 +519,21 @@ namespace mongo {
return;
lastReconnectTry = time(0);
- log() << "trying reconnect to " << serverAddress << endl;
+ log(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
- string tmp = serverAddress;
failed = false;
- if ( !connect(tmp.c_str(), errmsg) ) {
- log() << "reconnect " << serverAddress << " failed " << errmsg << endl;
+ if ( ! _connect(errmsg) ) {
+ log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
return;
}
- log() << "reconnect " << serverAddress << " ok" << endl;
+ log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
const char *dbname = i->first.c_str();
const char *username = i->second.first.c_str();
const char *password = i->second.second.c_str();
if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
- log() << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
+ log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
}
}
@@ -516,13 +554,76 @@ namespace mongo {
return auto_ptr< DBClientCursor >( 0 );
}
+ struct DBClientFunConvertor {
+ void operator()( DBClientCursorBatchIterator &i ) {
+ while( i.moreInCurrentBatch() ) {
+ _f( i.nextSafe() );
+ }
+ }
+ boost::function<void(const BSONObj &)> _f;
+ };
+
+ unsigned long long DBClientConnection::query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
+ DBClientFunConvertor fun;
+ fun._f = f;
+ boost::function<void(DBClientCursorBatchIterator &)> ptr( fun );
+ return DBClientConnection::query( ptr, ns, query, fieldsToReturn, queryOptions );
+ }
+
+ unsigned long long DBClientConnection::query( boost::function<void(DBClientCursorBatchIterator &)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
+ // mask options
+ queryOptions &= (int)( QueryOption_NoCursorTimeout | QueryOption_SlaveOk );
+ unsigned long long n = 0;
+
+ bool doExhaust = ( availableOptions() & QueryOption_Exhaust );
+ if ( doExhaust ) {
+ queryOptions |= (int)QueryOption_Exhaust;
+ }
+ auto_ptr<DBClientCursor> c( this->query(ns, query, 0, 0, fieldsToReturn, queryOptions) );
+ massert( 13386, "socket error for mapping query", c.get() );
+
+ if ( !doExhaust ) {
+ while( c->more() ) {
+ DBClientCursorBatchIterator i( *c );
+ f( i );
+ n += i.n();
+ }
+ return n;
+ }
+
+ try {
+ while( 1 ) {
+ while( c->moreInCurrentBatch() ) {
+ DBClientCursorBatchIterator i( *c );
+ f( i );
+ n += i.n();
+ }
+
+ if( c->getCursorId() == 0 )
+ break;
+
+ c->exhaustReceiveMore();
+ }
+ }
+ catch(std::exception&) {
+ /* connection CANNOT be used anymore as more data may be on the way from the server.
+ we have to reconnect.
+ */
+ failed = true;
+ p->shutdown();
+ throw;
+ }
+
+ return n;
+ }
+
void DBClientBase::insert( const string & ns , BSONObj obj ) {
Message toSend;
BufBuilder b;
int opts = 0;
- b.append( opts );
- b.append( ns );
+ b.appendNum( opts );
+ b.appendStr( ns );
obj.appendSelfToBufBuilder( b );
toSend.setData( dbInsert , b.buf() , b.len() );
@@ -535,8 +636,8 @@ namespace mongo {
BufBuilder b;
int opts = 0;
- b.append( opts );
- b.append( ns );
+ b.appendNum( opts );
+ b.appendStr( ns );
for( vector< BSONObj >::const_iterator i = v.begin(); i != v.end(); ++i )
i->appendSelfToBufBuilder( b );
@@ -550,13 +651,13 @@ namespace mongo {
BufBuilder b;
int opts = 0;
- b.append( opts );
- b.append( ns );
+ b.appendNum( opts );
+ b.appendStr( ns );
int flags = 0;
if ( justOne )
- flags |= 1;
- b.append( flags );
+ flags |= RemoveOption_JustOne;
+ b.appendNum( flags );
obj.obj.appendSelfToBufBuilder( b );
@@ -568,13 +669,13 @@ namespace mongo {
void DBClientBase::update( const string & ns , Query query , BSONObj obj , bool upsert , bool multi ) {
BufBuilder b;
- b.append( (int)0 ); // reserved
- b.append( ns );
+ b.appendNum( (int)0 ); // reserved
+ b.appendStr( ns );
int flags = 0;
if ( upsert ) flags |= UpdateOption_Upsert;
if ( multi ) flags |= UpdateOption_Multi;
- b.append( flags );
+ b.appendNum( flags );
query.obj.appendSelfToBufBuilder( b );
obj.appendSelfToBufBuilder( b );
@@ -599,7 +700,7 @@ namespace mongo {
if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
info ) ){
- log() << "dropIndex failed: " << info << endl;
+ log(_logLevel) << "dropIndex failed: " << info << endl;
uassert( 10007 , "dropIndex failed" , 0 );
}
resetIndexCache();
@@ -684,15 +785,21 @@ namespace mongo {
/* -- DBClientCursor ---------------------------------------------- */
+#ifdef _DEBUG
+#define CHECK_OBJECT( o , msg ) massert( 10337 , (string)"object not valid" + (msg) , (o).isValid() )
+#else
+#define CHECK_OBJECT( o , msg )
+#endif
+
void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
CHECK_OBJECT( query , "assembleRequest query" );
// see query.h for the protocol we are using here.
BufBuilder b;
int opts = queryOptions;
- b.append(opts);
- b.append(ns.c_str());
- b.append(nToSkip);
- b.append(nToReturn);
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nToSkip);
+ b.appendNum(nToReturn);
query.appendSelfToBufBuilder(b);
if ( fieldsToReturn )
fieldsToReturn->appendSelfToBufBuilder(b);
@@ -713,6 +820,10 @@ namespace mongo {
port().piggyBack( toSend );
}
+ void DBClientConnection::recv( Message &m ) {
+ port().recv(m);
+ }
+
bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk ) {
/* todo: this is very ugly messagingport::call returns an error code AND can throw
an exception. we should make it return void and just throw an exception anytime
@@ -722,7 +833,7 @@ namespace mongo {
if ( !port().call(toSend, response) ) {
failed = true;
if ( assertOk )
- massert( 10278 , "dbclient error communicating with server", false);
+ uassert( 10278 , "dbclient error communicating with server", false);
return false;
}
}
@@ -736,178 +847,128 @@ namespace mongo {
void DBClientConnection::checkResponse( const char *data, int nReturned ) {
/* check for errors. the only one we really care about at
this stage is "not master" */
- if ( clientPaired && nReturned ) {
+ if ( clientSet && nReturned ) {
+ assert(data);
BSONObj o(data);
BSONElement e = o.firstElement();
if ( strcmp(e.fieldName(), "$err") == 0 &&
e.type() == String && strncmp(e.valuestr(), "not master", 10) == 0 ) {
- clientPaired->isntMaster();
+ clientSet->isntMaster();
}
}
}
- int DBClientCursor::nextBatchSize(){
- if ( nToReturn == 0 )
- return batchSize;
- if ( batchSize == 0 )
- return nToReturn;
+ void DBClientConnection::killCursor( long long cursorId ){
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)1 ); // number
+ b.appendNum( cursorId );
- return batchSize < nToReturn ? batchSize : nToReturn;
- }
-
- bool DBClientCursor::init() {
- Message toSend;
- if ( !cursorId ) {
- assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
- } else {
- BufBuilder b;
- b.append( opts );
- b.append( ns.c_str() );
- b.append( nToReturn );
- b.append( cursorId );
- toSend.setData( dbGetMore, b.buf(), b.len() );
- }
- if ( !connector->call( toSend, *m, false ) )
- return false;
- if ( ! m->data )
- return false;
- dataReceived();
- return true;
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ sayPiggyBack( m );
}
- void DBClientCursor::requestMore() {
- assert( cursorId && pos == nReturned );
-
- if (haveLimit){
- nToReturn -= nReturned;
- assert(nToReturn > 0);
- }
- BufBuilder b;
- b.append(opts);
- b.append(ns.c_str());
- b.append(nextBatchSize());
- b.append(cursorId);
+ /* --- class dbclientpaired --- */
- Message toSend;
- toSend.setData(dbGetMore, b.buf(), b.len());
- auto_ptr<Message> response(new Message());
- connector->call( toSend, *response );
-
- m = response;
- dataReceived();
- }
-
- void DBClientCursor::dataReceived() {
- QueryResult *qr = (QueryResult *) m->data;
- resultFlags = qr->resultFlags();
- if ( qr->resultFlags() & QueryResult::ResultFlag_CursorNotFound ) {
- // cursor id no longer valid at the server.
- assert( qr->cursorId == 0 );
- cursorId = 0; // 0 indicates no longer valid (dead)
- // TODO: should we throw a UserException here???
- }
- if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
- // only set initially: we don't want to kill it on end of data
- // if it's a tailable cursor
- cursorId = qr->cursorId;
- }
- nReturned = qr->nReturned;
- pos = 0;
- data = qr->data();
-
- connector->checkResponse( data, nReturned );
- /* this assert would fire the way we currently work:
- assert( nReturned || cursorId == 0 );
- */
+ string DBClientReplicaSet::toString() {
+ return getServerAddress();
}
- /** If true, safe to call next(). Requests more from server if necessary. */
- bool DBClientCursor::more() {
- if ( !_putBack.empty() )
- return true;
+ DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers )
+ : _name( name ) , _currentMaster( 0 ), _servers( servers ){
- if (haveLimit && pos >= nToReturn)
- return false;
-
- if ( pos < nReturned )
- return true;
-
- if ( cursorId == 0 )
- return false;
-
- requestMore();
- return pos < nReturned;
+ for ( unsigned i=0; i<_servers.size(); i++ )
+ _conns.push_back( new DBClientConnection( true , this ) );
}
-
- BSONObj DBClientCursor::next() {
- assert( more() );
- if ( !_putBack.empty() ) {
- BSONObj ret = _putBack.top();
- _putBack.pop();
- return ret;
- }
- pos++;
- BSONObj o(data);
- data += o.objsize();
- return o;
- }
-
- DBClientCursor::~DBClientCursor() {
- DESTRUCTOR_GUARD (
- if ( cursorId && _ownCursor ) {
- BufBuilder b;
- b.append( (int)0 ); // reserved
- b.append( (int)1 ); // number
- b.append( cursorId );
-
- Message m;
- m.setData( dbKillCursors , b.buf() , b.len() );
-
- connector->sayPiggyBack( m );
- }
- );
+
+ DBClientReplicaSet::~DBClientReplicaSet(){
+ for ( unsigned i=0; i<_conns.size(); i++ )
+ delete _conns[i];
+ _conns.clear();
}
-
- /* --- class dbclientpaired --- */
-
- string DBClientPaired::toString() {
- stringstream ss;
- ss << "state: " << master << '\n';
- ss << "left: " << left.toStringLong() << '\n';
- ss << "right: " << right.toStringLong() << '\n';
+
+ string DBClientReplicaSet::getServerAddress() const {
+ StringBuilder ss;
+ if ( _name.size() )
+ ss << _name << "/";
+
+ for ( unsigned i=0; i<_servers.size(); i++ ){
+ if ( i > 0 )
+ ss << ",";
+ ss << _servers[i].toString();
+ }
return ss.str();
}
-#pragma warning(disable: 4355)
- DBClientPaired::DBClientPaired() :
- left(true, this), right(true, this)
- {
- master = NotSetL;
- }
-#pragma warning(default: 4355)
-
/* find which server, the left or right, is currently master mode */
- void DBClientPaired::_checkMaster() {
+ void DBClientReplicaSet::_checkMaster() {
+
+ bool triedQuickCheck = false;
+
+ log( _logLevel + 1) << "_checkMaster on: " << toString() << endl;
for ( int retry = 0; retry < 2; retry++ ) {
- int x = master;
- for ( int pass = 0; pass < 2; pass++ ) {
- DBClientConnection& c = x == 0 ? left : right;
+ for ( unsigned i=0; i<_conns.size(); i++ ){
+ DBClientConnection * c = _conns[i];
try {
bool im;
BSONObj o;
- c.isMaster(im, &o);
+ c->isMaster(im, &o);
+
if ( retry )
- log() << "checkmaster: " << c.toString() << ' ' << o.toString() << '\n';
+ log(_logLevel) << "checkmaster: " << c->toString() << ' ' << o << '\n';
+
+ string maybePrimary;
+ if ( o["hosts"].type() == Array ){
+ if ( o["primary"].type() == String )
+ maybePrimary = o["primary"].String();
+
+ BSONObjIterator hi(o["hosts"].Obj());
+ while ( hi.more() ){
+ string toCheck = hi.next().String();
+ int found = -1;
+ for ( unsigned x=0; x<_servers.size(); x++ ){
+ if ( toCheck == _servers[x].toString() ){
+ found = x;
+ break;
+ }
+ }
+
+ if ( found == -1 ){
+ HostAndPort h( toCheck );
+ _servers.push_back( h );
+ _conns.push_back( new DBClientConnection( true, this ) );
+ string temp;
+ _conns[ _conns.size() - 1 ]->connect( h , temp );
+ log( _logLevel ) << "updated set to: " << toString() << endl;
+ }
+
+ }
+ }
+
if ( im ) {
- master = (State) (x + 2);
+ _currentMaster = c;
return;
}
+
+ if ( maybePrimary.size() && ! triedQuickCheck ){
+ for ( unsigned x=0; x<_servers.size(); x++ ){
+ if ( _servers[i].toString() != maybePrimary )
+ continue;
+ triedQuickCheck = true;
+ _conns[x]->isMaster( im , &o );
+ if ( im ){
+ _currentMaster = _conns[x];
+ return;
+ }
+ }
+ }
}
- catch (AssertionException&) {
+ catch ( std::exception& e ) {
if ( retry )
- log() << "checkmaster: caught exception " << c.toString() << '\n';
+ log(_logLevel) << "checkmaster: caught exception " << c->toString() << ' ' << e.what() << endl;
}
- x = x^1;
}
sleepsecs(1);
}
@@ -915,36 +976,54 @@ namespace mongo {
uassert( 10009 , "checkmaster: no master found", false);
}
- inline DBClientConnection& DBClientPaired::checkMaster() {
- if ( master > NotSetR ) {
+ DBClientConnection * DBClientReplicaSet::checkMaster() {
+ if ( _currentMaster ){
// a master is selected. let's just make sure connection didn't die
- DBClientConnection& c = master == Left ? left : right;
- if ( !c.isFailed() )
- return c;
- // after a failure, on the next checkMaster, start with the other
- // server -- presumably it took over. (not critical which we check first,
- // just will make the failover slightly faster if we guess right)
- master = master == Left ? NotSetR : NotSetL;
+ if ( ! _currentMaster->isFailed() )
+ return _currentMaster;
+ _currentMaster = 0;
}
_checkMaster();
- assert( master > NotSetR );
- return master == Left ? left : right;
+ assert( _currentMaster );
+ return _currentMaster;
}
- DBClientConnection& DBClientPaired::slaveConn(){
- DBClientConnection& m = checkMaster();
- assert( ! m.isFailed() );
- return master == Left ? right : left;
+ DBClientConnection& DBClientReplicaSet::masterConn(){
+ return *checkMaster();
}
- bool DBClientPaired::connect(const string &serverHostname1, const string &serverHostname2) {
+ DBClientConnection& DBClientReplicaSet::slaveConn(){
+ DBClientConnection * m = checkMaster();
+ assert( ! m->isFailed() );
+
+ DBClientConnection * failedSlave = 0;
+
+ for ( unsigned i=0; i<_conns.size(); i++ ){
+ if ( m == _conns[i] )
+ continue;
+ failedSlave = _conns[i];
+ if ( _conns[i]->isFailed() )
+ continue;
+ return *_conns[i];
+ }
+
+ assert(failedSlave);
+ return *failedSlave;
+ }
+
+ bool DBClientReplicaSet::connect(){
string errmsg;
- bool l = left.connect(serverHostname1, errmsg);
- bool r = right.connect(serverHostname2, errmsg);
- master = l ? NotSetL : NotSetR;
- if ( !l && !r ) // it would be ok to fall through, but checkMaster will then try an immediate reconnect which is slow
+
+ bool anyGood = false;
+ for ( unsigned i=0; i<_conns.size(); i++ ){
+ if ( _conns[i]->connect( _servers[i] , errmsg ) )
+ anyGood = true;
+ }
+
+ if ( ! anyGood )
return false;
+
try {
checkMaster();
}
@@ -954,61 +1033,44 @@ namespace mongo {
return true;
}
- bool DBClientPaired::connect(string hostpairstring) {
- size_t comma = hostpairstring.find( "," );
- uassert( 10010 , "bad hostpairstring", comma != string::npos);
- return connect( hostpairstring.substr( 0 , comma ) , hostpairstring.substr( comma + 1 ) );
- }
-
- bool DBClientPaired::auth(const string &dbname, const string &username, const string &pwd, string& errmsg) {
- DBClientConnection& m = checkMaster();
- if( !m.auth(dbname, username, pwd, errmsg) )
+ bool DBClientReplicaSet::auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword ) {
+ DBClientConnection * m = checkMaster();
+ if( !m->auth(dbname, username, pwd, errmsg, digestPassword ) )
return false;
+
/* we try to authentiate with the other half of the pair -- even if down, that way the authInfo is cached. */
- string e;
- try {
- if( &m == &left )
- right.auth(dbname, username, pwd, e);
- else
- left.auth(dbname, username, pwd, e);
- }
- catch( AssertionException&) {
- }
+ for ( unsigned i=0; i<_conns.size(); i++ ){
+ if ( _conns[i] == m )
+ continue;
+ try {
+ string e;
+ _conns[i]->auth( dbname , username , pwd , e , digestPassword );
+ }
+ catch ( AssertionException& ){
+ }
+ }
+
return true;
}
- auto_ptr<DBClientCursor> DBClientPaired::query(const string &a, Query b, int c, int d,
- const BSONObj *e, int f, int g)
- {
- return checkMaster().query(a,b,c,d,e,f,g);
- }
-
- BSONObj DBClientPaired::findOne(const string &a, Query b, const BSONObj *c, int d) {
- return checkMaster().findOne(a,b,c,d);
- }
-
- void testPaired() {
- DBClientPaired p;
- log() << "connect returns " << p.connect("localhost:27017", "localhost:27018") << endl;
-
- //DBClientConnection p(true);
- string errmsg;
- // log() << "connect " << p.connect("localhost", errmsg) << endl;
- log() << "auth " << p.auth("dwight", "u", "p", errmsg) << endl;
-
- while( 1 ) {
- sleepsecs(3);
- try {
- log() << "findone returns " << p.findOne("dwight.foo", BSONObj()).toString() << endl;
- sleepsecs(3);
- BSONObj info;
- bool im;
- log() << "ismaster returns " << p.isMaster(im,&info) << " info: " << info.toString() << endl;
- }
- catch(...) {
- cout << "caught exception" << endl;
- }
- }
- }
+ auto_ptr<DBClientCursor> DBClientReplicaSet::query(const string &a, Query b, int c, int d,
+ const BSONObj *e, int f, int g){
+ // TODO: if slave ok is set go to a slave
+ return checkMaster()->query(a,b,c,d,e,f,g);
+ }
+ BSONObj DBClientReplicaSet::findOne(const string &a, const Query& b, const BSONObj *c, int d) {
+ return checkMaster()->findOne(a,b,c,d);
+ }
+
+ bool serverAlive( const string &uri ) {
+ DBClientConnection c( false, 0, 20 ); // potentially the connection to server could fail while we're checking if it's alive - so use timeouts
+ string err;
+ if ( !c.connect( uri, err ) )
+ return false;
+ if ( !c.simpleCommand( "admin", 0, "ping" ) )
+ return false;
+ return true;
+ }
+
} // namespace mongo
diff --git a/client/dbclient.h b/client/dbclient.h
index a2fad8e..639d960 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -17,7 +17,7 @@
#pragma once
-#include "../stdafx.h"
+#include "../pch.h"
#include "../util/message.h"
#include "../db/jsobj.h"
#include "../db/json.h"
@@ -51,7 +51,7 @@ namespace mongo {
// an extended period of time.
QueryOption_OplogReplay = 1 << 3,
- /** The server normally times out idle cursors after an inactivy period to prevent excess memory use
+ /** The server normally times out idle cursors after an inactivy period to prevent excess memory uses
Set this option to prevent that.
*/
QueryOption_NoCursorTimeout = 1 << 4,
@@ -59,7 +59,18 @@ namespace mongo {
/** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
than returning no data. After a timeout period, we do return as normal.
*/
- QueryOption_AwaitData = 1 << 5
+ QueryOption_AwaitData = 1 << 5,
+
+ /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
+ will fully read all data queried. Faster when you are pulling a lot of data and know you want to
+ pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
+
+ Use the query( boost::function<void(const BSONObj&)> f, ... ) version of the connection's query()
+ method, and it will take care of all the details for you.
+ */
+ QueryOption_Exhaust = 1 << 6,
+
+ QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOption_AwaitData | QueryOption_Exhaust
};
@@ -69,10 +80,129 @@ namespace mongo {
/** Update multiple documents (if multiple documents match query expression).
(Default is update a single document and stop.) */
- UpdateOption_Multi = 1 << 1
+ UpdateOption_Multi = 1 << 1,
+
+ /** flag from mongo saying this update went everywhere */
+ UpdateOption_Broadcast = 1 << 2
+ };
+
+ enum RemoveOptions {
+ /** only delete one option */
+ RemoveOption_JustOne = 1 << 0,
+
+ /** flag from mongo saying this update went everywhere */
+ RemoveOption_Broadcast = 1 << 1
+ };
+
+ class DBClientBase;
+
+ class ConnectionString {
+ public:
+ enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC };
+
+ ConnectionString( const HostAndPort& server ){
+ _type = MASTER;
+ _servers.push_back( server );
+ _finishInit();
+ }
+
+ ConnectionString( ConnectionType type , const vector<HostAndPort>& servers )
+ : _type( type ) , _servers( servers ){
+ _finishInit();
+ }
+
+ ConnectionString( ConnectionType type , const string& s , const string& setName = "" ){
+ _type = type;
+ _setName = setName;
+ _fillServers( s );
+
+ switch ( _type ){
+ case MASTER:
+ assert( _servers.size() == 1 );
+ break;
+ case SET:
+ assert( _setName.size() );
+ assert( _servers.size() >= 1 ); // 1 is ok since we can derive
+ break;
+ case PAIR:
+ assert( _servers.size() == 2 );
+ break;
+ default:
+ assert( _servers.size() > 0 );
+ }
+
+ _finishInit();
+ }
+
+ ConnectionString( const string& s , ConnectionType favoredMultipleType ){
+ _fillServers( s );
+ if ( _servers.size() == 1 ){
+ _type = MASTER;
+ }
+ else {
+ _type = favoredMultipleType;
+ assert( _type != MASTER );
+ }
+ _finishInit();
+ }
+
+ bool isValid() const { return _type != INVALID; }
+
+ string toString() const {
+ return _string;
+ }
+
+ DBClientBase* connect( string& errmsg ) const;
+
+ static ConnectionString parse( const string& url , string& errmsg );
+
+ private:
+
+ ConnectionString(){
+ _type = INVALID;
+ }
+
+ void _fillServers( string s ){
+ string::size_type idx;
+ while ( ( idx = s.find( ',' ) ) != string::npos ){
+ _servers.push_back( s.substr( 0 , idx ) );
+ s = s.substr( idx + 1 );
+ }
+ _servers.push_back( s );
+ }
+
+ void _finishInit(){
+ stringstream ss;
+ if ( _type == SET )
+ ss << _setName << "/";
+ for ( unsigned i=0; i<_servers.size(); i++ ){
+ if ( i > 0 )
+ ss << ",";
+ ss << _servers[i].toString();
+ }
+ _string = ss.str();
+ }
+
+ ConnectionType _type;
+ vector<HostAndPort> _servers;
+ string _string;
+ string _setName;
+ };
+
+ /**
+ * controls how much a clients cares about writes
+ * default is NORMAL
+ */
+ enum WriteConcern {
+ W_NONE = 0 , // TODO: not every connection type fully supports this
+ W_NORMAL = 1
+ // TODO SAFE = 2
};
class BSONObj;
+ class ScopedDbConnection;
+ class DBClientCursor;
+ class DBClientCursorBatchIterator;
/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
Examples:
@@ -160,7 +290,7 @@ namespace mongo {
/**
* if this query has an orderby, hint, or some other field
*/
- bool isComplex() const;
+ bool isComplex( bool * hasDollar = 0 ) const;
BSONObj getFilter() const;
BSONObj getSort() const;
@@ -195,146 +325,12 @@ namespace mongo {
virtual bool call( Message &toSend, Message &response, bool assertOk=true ) = 0;
virtual void say( Message &toSend ) = 0;
virtual void sayPiggyBack( Message &toSend ) = 0;
- virtual void checkResponse( const string &data, int nReturned ) {}
- };
-
- /** Queries return a cursor object */
- class DBClientCursor : boost::noncopyable {
- friend class DBClientBase;
- bool init();
- public:
- /** If true, safe to call next(). Requests more from server if necessary. */
- bool more();
+ virtual void checkResponse( const char* data, int nReturned ) {}
- /** If true, there is more in our local buffers to be fetched via next(). Returns
- false when a getMore request back to server would be required. You can use this
- if you want to exhaust whatever data has been fetched to the client already but
- then perhaps stop.
- */
- bool moreInCurrentBatch() { return !_putBack.empty() || pos < nReturned; }
-
- /** next
- @return next object in the result cursor.
- on an error at the remote server, you will get back:
- { $err: <string> }
- if you do not want to handle that yourself, call nextSafe().
- */
- BSONObj next();
-
- /**
- restore an object previously returned by next() to the cursor
- */
- void putBack( const BSONObj &o ) { _putBack.push( o.getOwned() ); }
-
- /** throws AssertionException if get back { $err : ... } */
- BSONObj nextSafe() {
- BSONObj o = next();
- BSONElement e = o.firstElement();
- assert( strcmp(e.fieldName(), "$err") != 0 );
- return o;
- }
-
- /**
- iterate the rest of the cursor and return the number if items
- */
- int itcount(){
- int c = 0;
- while ( more() ){
- next();
- c++;
- }
- return c;
- }
-
- /** cursor no longer valid -- use with tailable cursors.
- note you should only rely on this once more() returns false;
- 'dead' may be preset yet some data still queued and locally
- available from the dbclientcursor.
- */
- bool isDead() const {
- return cursorId == 0;
- }
-
- bool tailable() const {
- return (opts & QueryOption_CursorTailable) != 0;
- }
-
- /** see QueryResult::ResultFlagType (db/dbmessage.h) for flag values
- mostly these flags are for internal purposes -
- ResultFlag_ErrSet is the possible exception to that
- */
- bool hasResultFlag( int flag ){
- return (resultFlags & flag) != 0;
- }
-
- DBClientCursor( DBConnector *_connector, const string &_ns, BSONObj _query, int _nToReturn,
- int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions , int bs ) :
- connector(_connector),
- ns(_ns),
- query(_query),
- nToReturn(_nToReturn),
- haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
- nToSkip(_nToSkip),
- fieldsToReturn(_fieldsToReturn),
- opts(queryOptions),
- batchSize(bs),
- m(new Message()),
- cursorId(),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ) {
- }
-
- DBClientCursor( DBConnector *_connector, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
- connector(_connector),
- ns(_ns),
- nToReturn( _nToReturn ),
- haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
- opts( options ),
- m(new Message()),
- cursorId( _cursorId ),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ) {
- }
-
- virtual ~DBClientCursor();
-
- long long getCursorId() const { return cursorId; }
-
- /** by default we "own" the cursor and will send the server a KillCursor
- message when ~DBClientCursor() is called. This function overrides that.
- */
- void decouple() { _ownCursor = false; }
-
- private:
-
- int nextBatchSize();
-
- DBConnector *connector;
- string ns;
- BSONObj query;
- int nToReturn;
- bool haveLimit;
- int nToSkip;
- const BSONObj *fieldsToReturn;
- int opts;
- int batchSize;
- auto_ptr<Message> m;
- stack< BSONObj > _putBack;
-
- int resultFlags;
- long long cursorId;
- int nReturned;
- int pos;
- const char *data;
- void dataReceived();
- void requestMore();
- bool _ownCursor; // see decouple()
+ /* used by QueryOption_Exhaust. To use that your subclass must implement this. */
+ virtual void recv( Message& m ) { assert(false); }
};
-
+
/**
The interface that any db connection should implement
*/
@@ -343,6 +339,7 @@ namespace mongo {
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
+ /** don't use this - called automatically by DBClientCursor for you */
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
virtual void insert( const string &ns, BSONObj obj ) = 0;
@@ -359,7 +356,7 @@ namespace mongo {
@return a single object that matches the query. if none do, then the object is empty
@throws AssertionException
*/
- virtual BSONObj findOne(const string &ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
};
@@ -371,33 +368,38 @@ namespace mongo {
class DBClientWithCommands : public DBClientInterface {
set<string> _seenIndexes;
public:
+ /** controls how chatty the client is about network errors & such. See log.h */
+ int _logLevel;
+
+ DBClientWithCommands() : _logLevel(0), _cachedAvailableOptions( (enum QueryOptions)0 ), _haveCachedAvailableOptions(false) { }
- /** helper function. run a simple command where the command expression is simply
- { command : 1 }
+ /** helper function. run a simple command where the command expression is simply
+ { command : 1 }
@param info -- where to put result object. may be null if caller doesn't need that info
@param command -- command name
- @return true if the command returned "ok".
- */
+ @return true if the command returned "ok".
+ */
bool simpleCommand(const string &dbname, BSONObj *info, const string &command);
/** Run a database command. Database commands are represented as BSON objects. Common database
commands have prebuilt helper functions -- see below. If a helper is not available you can
- directly call runCommand.
+ directly call runCommand.
@param dbname database name. Use "admin" for global administrative commands.
@param cmd the command object to execute. For example, { ismaster : 1 }
@param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
set.
- @return true if the command returned "ok".
+ @param options see enum QueryOptions - normally not needed to run a command
+ @return true if the command returned "ok".
*/
virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
/** Authorize access to a particular database.
- Authentication is separate for each database on the server -- you may authenticate for any
- number of databases on a single connection.
- The "admin" database is special and once authenticated provides access to all databases on the
- server.
- @param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
+ Authentication is separate for each database on the server -- you may authenticate for any
+ number of databases on a single connection.
+ The "admin" database is special and once authenticated provides access to all databases on the
+ server.
+ @param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
@return true if successful
*/
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
@@ -425,17 +427,17 @@ namespace mongo {
If the collection already exists, no action occurs.
- ns: fully qualified collection name
- size: desired initial extent size for the collection.
- Must be <= 1000000000 for normal collections.
- For fixed size (capped) collections, this size is the total/max size of the
- collection.
- capped: if true, this is a fixed size collection (where old data rolls out).
- max: maximum number of objects if capped (optional).
+ @param ns fully qualified collection name
+ @param size desired initial extent size for the collection.
+ Must be <= 1000000000 for normal collections.
+ For fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ @param capped if true, this is a fixed size collection (where old data rolls out).
+ @param max maximum number of objects if capped (optional).
returns true if successful.
*/
- bool createCollection(const string &ns, unsigned size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
+ bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
/** Get error result from the last operation on this connection.
@return error message text, or empty string if no error.
@@ -444,7 +446,9 @@ namespace mongo {
/** Get error result from the last operation on this connection.
@return full error object.
*/
- BSONObj getLastErrorDetailed();
+ virtual BSONObj getLastErrorDetailed();
+
+ static string getLastErrorString( const BSONObj& res );
/** Return the last error which has occurred, even if not the very last operation.
@@ -595,6 +599,8 @@ namespace mongo {
/**
get a list of all the current databases
+ uses the { listDatabases : 1 } command.
+ throws on error
*/
list<string> getDatabaseNames();
@@ -605,7 +611,6 @@ namespace mongo {
bool exists( const string& ns );
-
/** Create an index if it does not already exist.
ensureIndex calls are remembered so it is safe/fast to call this function many
times in your code.
@@ -666,25 +671,39 @@ namespace mongo {
protected:
bool isOk(const BSONObj&);
-
+
+ enum QueryOptions availableOptions();
+
+ private:
+ enum QueryOptions _cachedAvailableOptions;
+ bool _haveCachedAvailableOptions;
};
/**
abstract class that implements the core db operations
*/
class DBClientBase : public DBClientWithCommands, public DBConnector {
+ protected:
+ WriteConcern _writeConcern;
+
public:
+ DBClientBase(){
+ _writeConcern = W_NORMAL;
+ }
+
+ WriteConcern getWriteConcern() const { return _writeConcern; }
+ void setWriteConcern( WriteConcern w ){ _writeConcern = w; }
+
/** send a query to the database.
- ns: namespace to query, format is <dbname>.<collectname>[.<collectname>]*
- query: query to perform on the collection. this is a BSONObj (binary JSON)
+ @param ns namespace to query, format is <dbname>.<collectname>[.<collectname>]*
+ @param query query to perform on the collection. this is a BSONObj (binary JSON)
You may format as
{ query: { ... }, orderby: { ... } }
to specify a sort order.
- nToReturn: n to return. 0 = unlimited
- nToSkip: start with the nth item
- fieldsToReturn:
- optional template of which fields to select. if unspecified, returns all fields
- queryOptions: see options enum at top of this file
+ @param nToReturn n to return. 0 = unlimited
+ @param nToSkip start with the nth item
+ @param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields
+ @param queryOptions see options enum at top of this file
@return cursor. 0 if error (connection failure)
@throws AssertionException
@@ -692,12 +711,13 @@ namespace mongo {
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
- /** @param cursorId id of cursor to retrieve
+ /** don't use this - called automatically by DBClientCursor for you
+ @param cursorId id of cursor to retrieve
@return an handle to a previously allocated cursor
@throws AssertionException
*/
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 );
-
+
/**
insert an object into the database
*/
@@ -717,11 +737,13 @@ namespace mongo {
/**
updates objects matching query
*/
- virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 );
+ virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = false , bool multi = false );
virtual string getServerAddress() const = 0;
virtual bool isFailed() const = 0;
+
+ virtual void killCursor( long long cursorID ) = 0;
static int countCommas( const string& s ){
int n = 0;
@@ -730,9 +752,15 @@ namespace mongo {
n++;
return n;
}
- };
+
+ virtual bool callRead( Message& toSend , Message& response ) = 0;
+ // virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
+ virtual void say( Message& toSend ) = 0;
+
+ virtual ConnectionString::ConnectionType type() const = 0;
+ }; // DBClientBase
- class DBClientPaired;
+ class DBClientReplicaSet;
class ConnectException : public UserException {
public:
@@ -744,24 +772,31 @@ namespace mongo {
This is the main entry point for talking to a simple Mongo setup
*/
class DBClientConnection : public DBClientBase {
- DBClientPaired *clientPaired;
- auto_ptr<MessagingPort> p;
- auto_ptr<SockAddr> server;
+ DBClientReplicaSet *clientSet;
+ boost::scoped_ptr<MessagingPort> p;
+ boost::scoped_ptr<SockAddr> server;
bool failed; // true if some sort of fatal error has ever happened
bool autoReconnect;
time_t lastReconnectTry;
- string serverAddress; // remember for reconnects
+ HostAndPort _server; // remember for reconnects
+ string _serverString;
+ int _port;
void _checkConnection();
void checkConnection() { if( failed ) _checkConnection(); }
map< string, pair<string,string> > authCache;
+ int _timeout;
+
+ bool _connect( string& errmsg );
public:
/**
@param _autoReconnect if true, automatically reconnect on a connection failure
- @param cp used by DBClientPaired. You do not need to specify this parameter
+ @param cp used by DBClientReplicaSet. You do not need to specify this parameter
+ @param timeout tcp timeout in seconds - this is for read/write, not connect.
+ Connect timeout is fixed, but short, at 5 seconds.
*/
- DBClientConnection(bool _autoReconnect=false,DBClientPaired* cp=0) :
- clientPaired(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0) { }
+ DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, int timeout=0) :
+ clientSet(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _timeout(timeout) { }
/** Connect to a Mongo database server.
@@ -769,10 +804,27 @@ namespace mongo {
false was returned -- it will try to connect again.
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ If you use IPv6 you must add a port number ( ::1:27017 )
@param errmsg any relevant error message will appended to the string
+ @deprecated please use HostAndPort
@return false if fails to connect.
*/
- virtual bool connect(const string &serverHostname, string& errmsg);
+ virtual bool connect(const char * hostname, string& errmsg){
+ // TODO: remove this method
+ HostAndPort t( hostname );
+ return connect( t , errmsg );
+ }
+
+ /** Connect to a Mongo database server.
+
+ If autoReconnect is true, you can try to use the DBClientConnection even when
+ false was returned -- it will try to connect again.
+
+ @param server server to connect to.
+ @param errmsg any relevant error message will appended to the string
+ @return false if fails to connect.
+ */
+ virtual bool connect(const HostAndPort& server, string& errmsg);
/** Connect to a Mongo database server. Exception throwing version.
Throws a UserException if cannot connect.
@@ -782,20 +834,26 @@ namespace mongo {
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
*/
- void connect(string serverHostname) {
+ void connect(const string& serverHostname) {
string errmsg;
- if( !connect(serverHostname.c_str(), errmsg) )
+ if( !connect(HostAndPort(serverHostname), errmsg) )
throw ConnectException(string("can't connect ") + errmsg);
}
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
- virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query=Query(), int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) {
checkConnection();
return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions , batchSize );
}
+ /** uses QueryOption_Exhaust
+ use DBClientCursorBatchIterator if you want to do items in large blocks, perhpas to avoid granular locking and such.
+ */
+ unsigned long long query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
/**
@return true if this connection is currently in a failed state. When autoreconnect is on,
a connection will transition back to an ok state after reconnecting.
@@ -805,67 +863,75 @@ namespace mongo {
}
MessagingPort& port() {
- return *p.get();
+ return *p;
}
string toStringLong() const {
stringstream ss;
- ss << serverAddress;
+ ss << _serverString;
if ( failed ) ss << " failed";
return ss.str();
}
/** Returns the address of the server */
string toString() {
- return serverAddress;
+ return _serverString;
}
string getServerAddress() const {
- return serverAddress;
+ return _serverString;
+ }
+
+ virtual void killCursor( long long cursorID );
+
+ virtual bool callRead( Message& toSend , Message& response ){
+ return call( toSend , response );
}
- virtual bool call( Message &toSend, Message &response, bool assertOk = true );
virtual void say( Message &toSend );
+ virtual bool call( Message &toSend, Message &response, bool assertOk = true );
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+ protected:
+ friend class SyncClusterConnection;
+ virtual void recv( Message& m );
virtual void sayPiggyBack( Message &toSend );
virtual void checkResponse( const char *data, int nReturned );
};
-
- /** Use this class to connect to a replica pair of servers. The class will manage
- checking for which server in a replica pair is master, and do failover automatically.
-
+
+ /** Use this class to connect to a replica set of servers. The class will manage
+ checking for which server in a replica set is master, and do failover automatically.
+
+ This can also be used to connect to replica pairs since pairs are a subset of sets
+
On a failover situation, expect at least one operation to return an error (throw
an exception) before the failover is complete. Operations are not retried.
*/
- class DBClientPaired : public DBClientBase {
- DBClientConnection left,right;
- enum State {
- NotSetL=0,
- NotSetR=1,
- Left, Right
- } master;
+ class DBClientReplicaSet : public DBClientBase {
+ string _name;
+ DBClientConnection * _currentMaster;
+ vector<HostAndPort> _servers;
+ vector<DBClientConnection*> _conns;
+
void _checkMaster();
- DBClientConnection& checkMaster();
+ DBClientConnection * checkMaster();
public:
- /** Call connect() after constructing. autoReconnect is always on for DBClientPaired connections. */
- DBClientPaired();
+ /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
+ DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers );
+ virtual ~DBClientReplicaSet();
- /** Returns false is neither member of the pair were reachable, or neither is
+ /** Returns false if nomember of the set were reachable, or neither is
master, although,
when false returned, you can still try to use this connection object, it will
try reconnects.
*/
- bool connect(const string &serverHostname1, const string &serverHostname2);
+ bool connect();
- /** Connect to a server pair using a host pair string of the form
- hostname[:port],hostname[:port]
- */
- bool connect(string hostpairstring);
-
- /** Authorize. Authorizes both sides of the pair as needed.
+ /** Authorize. Authorizes all nodes as needed
*/
- bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg);
+ virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true );
/** throws userassertion "no master found" */
virtual
@@ -874,56 +940,69 @@ namespace mongo {
/** throws userassertion "no master found" */
virtual
- BSONObj findOne(const string &ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
/** insert */
virtual void insert( const string &ns , BSONObj obj ) {
- checkMaster().insert(ns, obj);
+ checkMaster()->insert(ns, obj);
}
/** insert multiple objects. Note that single object insert is asynchronous, so this version
is only nominally faster and not worth a special effort to try to use. */
virtual void insert( const string &ns, const vector< BSONObj >& v ) {
- checkMaster().insert(ns, v);
+ checkMaster()->insert(ns, v);
}
/** remove */
virtual void remove( const string &ns , Query obj , bool justOne = 0 ) {
- checkMaster().remove(ns, obj, justOne);
+ checkMaster()->remove(ns, obj, justOne);
}
/** update */
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 ) {
- return checkMaster().update(ns, query, obj, upsert,multi);
+ return checkMaster()->update(ns, query, obj, upsert,multi);
}
+ virtual void killCursor( long long cursorID ){
+ checkMaster()->killCursor( cursorID );
+ }
+
string toString();
/* this is the callback from our underlying connections to notify us that we got a "not master" error.
*/
void isntMaster() {
- master = ( ( master == Left ) ? NotSetR : NotSetL );
+ _currentMaster = 0;
}
- string getServerAddress() const {
- return left.getServerAddress() + "," + right.getServerAddress();
- }
-
+ string getServerAddress() const;
+
+ DBClientConnection& masterConn();
DBClientConnection& slaveConn();
- /* TODO - not yet implemented. mongos may need these. */
- virtual bool call( Message &toSend, Message &response, bool assertOk=true ) { assert(false); return false; }
- virtual void say( Message &toSend ) { assert(false); }
+
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true ) { return checkMaster()->call( toSend , response , assertOk ); }
+ virtual void say( Message &toSend ) { checkMaster()->say( toSend ); }
+ virtual bool callRead( Message& toSend , Message& response ){ return checkMaster()->callRead( toSend , response ); }
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+
+ protected:
virtual void sayPiggyBack( Message &toSend ) { assert(false); }
virtual void checkResponse( const char *data, int nReturned ) { assert(false); }
bool isFailed() const {
- // TODO: this really should check isFailed on current master as well
- return master < Left;
+ return _currentMaster == 0 || _currentMaster->isFailed();
}
};
+ /** pings server to check if it's up
+ */
+ bool serverAlive( const string &uri );
DBClientBase * createDirectClient();
} // namespace mongo
+
+#include "dbclientcursor.h"
+#include "undef_macros.h"
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
new file mode 100644
index 0000000..07771bb
--- /dev/null
+++ b/client/dbclientcursor.cpp
@@ -0,0 +1,232 @@
+// dbclient.cpp - connect to a Mongo database as a database, from C++
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "../db/dbmessage.h"
+#include "../db/cmdline.h"
+#include "connpool.h"
+#include "../s/shard.h"
+
+namespace mongo {
+
+ void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend );
+
+ int DBClientCursor::nextBatchSize(){
+
+ if ( nToReturn == 0 )
+ return batchSize;
+
+ if ( batchSize == 0 )
+ return nToReturn;
+
+ return batchSize < nToReturn ? batchSize : nToReturn;
+ }
+
+ bool DBClientCursor::init() {
+ Message toSend;
+ if ( !cursorId ) {
+ assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
+ } else {
+ BufBuilder b;
+ b.appendNum( opts );
+ b.appendStr( ns );
+ b.appendNum( nToReturn );
+ b.appendNum( cursorId );
+ toSend.setData( dbGetMore, b.buf(), b.len() );
+ }
+ if ( !connector->call( toSend, *m, false ) )
+ return false;
+ if ( m->empty() )
+ return false;
+ dataReceived();
+ return true;
+ }
+
+ void DBClientCursor::requestMore() {
+ assert( cursorId && pos == nReturned );
+
+ if (haveLimit){
+ nToReturn -= nReturned;
+ assert(nToReturn > 0);
+ }
+ BufBuilder b;
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nextBatchSize());
+ b.appendNum(cursorId);
+
+ Message toSend;
+ toSend.setData(dbGetMore, b.buf(), b.len());
+ auto_ptr<Message> response(new Message());
+
+ if ( connector ){
+ connector->call( toSend, *response );
+ m = response;
+ dataReceived();
+ }
+ else {
+ assert( _scopedHost.size() );
+ ScopedDbConnection conn( _scopedHost );
+ conn->call( toSend , *response );
+ connector = conn.get();
+ m = response;
+ dataReceived();
+ connector = 0;
+ conn.done();
+ }
+ }
+
+ /** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
+ void DBClientCursor::exhaustReceiveMore() {
+ assert( cursorId && pos == nReturned );
+ assert( !haveLimit );
+ auto_ptr<Message> response(new Message());
+ assert( connector );
+ connector->recv(*response);
+ m = response;
+ dataReceived();
+ }
+
+ void DBClientCursor::dataReceived() {
+ QueryResult *qr = (QueryResult *) m->singleData();
+ resultFlags = qr->resultFlags();
+
+ if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
+ // cursor id no longer valid at the server.
+ assert( qr->cursorId == 0 );
+ cursorId = 0; // 0 indicates no longer valid (dead)
+ if ( ! ( opts & QueryOption_CursorTailable ) )
+ throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
+ }
+
+ if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
+ // only set initially: we don't want to kill it on end of data
+ // if it's a tailable cursor
+ cursorId = qr->cursorId;
+ }
+
+ nReturned = qr->nReturned;
+ pos = 0;
+ data = qr->data();
+
+ connector->checkResponse( data, nReturned );
+ /* this assert would fire the way we currently work:
+ assert( nReturned || cursorId == 0 );
+ */
+ }
+
+ /** If true, safe to call next(). Requests more from server if necessary. */
+ bool DBClientCursor::more() {
+ _assertIfNull();
+
+ if ( !_putBack.empty() )
+ return true;
+
+ if (haveLimit && pos >= nToReturn)
+ return false;
+
+ if ( pos < nReturned )
+ return true;
+
+ if ( cursorId == 0 )
+ return false;
+
+ requestMore();
+ return pos < nReturned;
+ }
+
+ BSONObj DBClientCursor::next() {
+ DEV _assertIfNull();
+ if ( !_putBack.empty() ) {
+ BSONObj ret = _putBack.top();
+ _putBack.pop();
+ return ret;
+ }
+
+ uassert(13422, "DBClientCursor next() called but more() is false", pos < nReturned);
+
+ pos++;
+ BSONObj o(data);
+ data += o.objsize();
+ /* todo would be good to make data null at end of batch for safety */
+ return o;
+ }
+
+ void DBClientCursor::peek(vector<BSONObj>& v, int atMost) {
+ int m = atMost;
+
+ /*
+ for( stack<BSONObj>::iterator i = _putBack.begin(); i != _putBack.end(); i++ ) {
+ if( m == 0 )
+ return;
+ v.push_back(*i);
+ m--;
+ n++;
+ }
+ */
+
+ int p = pos;
+ const char *d = data;
+ while( m && p < nReturned ) {
+ BSONObj o(d);
+ d += o.objsize();
+ p++;
+ m--;
+ v.push_back(o);
+ }
+ }
+
+ void DBClientCursor::attach( AScopedConnection * conn ){
+ assert( _scopedHost.size() == 0 );
+ assert( connector == conn->get() );
+ _scopedHost = conn->getHost();
+ conn->done();
+ connector = 0;
+ }
+
+ DBClientCursor::~DBClientCursor() {
+ if (!this)
+ return;
+
+ DESTRUCTOR_GUARD (
+
+ if ( cursorId && _ownCursor ) {
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)1 ); // number
+ b.appendNum( cursorId );
+
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ if ( connector ){
+ connector->sayPiggyBack( m );
+ }
+ else {
+ assert( _scopedHost.size() );
+ ScopedDbConnection conn( _scopedHost );
+ conn->sayPiggyBack( m );
+ conn.done();
+ }
+ }
+
+ );
+ }
+
+
+} // namespace mongo
diff --git a/client/dbclientcursor.h b/client/dbclientcursor.h
new file mode 100644
index 0000000..51cdc13
--- /dev/null
+++ b/client/dbclientcursor.h
@@ -0,0 +1,204 @@
+// file dbclientcursor.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/message.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+#include <stack>
+
+namespace mongo {
+
+ class AScopedConnection;
+
+ /** Queries return a cursor object */
+ class DBClientCursor : boost::noncopyable {
+ public:
+ /** If true, safe to call next(). Requests more from server if necessary. */
+ bool more();
+
+ /** If true, there is more in our local buffers to be fetched via next(). Returns
+ false when a getMore request back to server would be required. You can use this
+ if you want to exhaust whatever data has been fetched to the client already but
+ then perhaps stop.
+ */
+ int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + nReturned - pos; }
+ bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
+
+ /** next
+ @return next object in the result cursor.
+ on an error at the remote server, you will get back:
+ { $err: <string> }
+ if you do not want to handle that yourself, call nextSafe().
+ */
+ BSONObj next();
+
+ /**
+ restore an object previously returned by next() to the cursor
+ */
+ void putBack( const BSONObj &o ) { _putBack.push( o.getOwned() ); }
+
+ /** throws AssertionException if get back { $err : ... } */
+ BSONObj nextSafe() {
+ BSONObj o = next();
+ BSONElement e = o.firstElement();
+ if( strcmp(e.fieldName(), "$err") == 0 ) {
+ if( logLevel >= 5 )
+ log() << "nextSafe() error " << o.toString() << endl;
+ uassert(13106, "nextSafe(): " + o.toString(), false);
+ }
+ return o;
+ }
+
+ /** peek ahead at items buffered for future next() calls.
+ never requests new data from the server. so peek only effective
+ with what is already buffered.
+ WARNING: no support for _putBack yet!
+ */
+ void peek(vector<BSONObj>&, int atMost);
+
+ /**
+ iterate the rest of the cursor and return the number if items
+ */
+ int itcount(){
+ int c = 0;
+ while ( more() ){
+ next();
+ c++;
+ }
+ return c;
+ }
+
+ /** cursor no longer valid -- use with tailable cursors.
+ note you should only rely on this once more() returns false;
+ 'dead' may be preset yet some data still queued and locally
+ available from the dbclientcursor.
+ */
+ bool isDead() const {
+ return !this || cursorId == 0;
+ }
+
+ bool tailable() const {
+ return (opts & QueryOption_CursorTailable) != 0;
+ }
+
+ /** see ResultFlagType (constants.h) for flag values
+ mostly these flags are for internal purposes -
+ ResultFlag_ErrSet is the possible exception to that
+ */
+ bool hasResultFlag( int flag ){
+ _assertIfNull();
+ return (resultFlags & flag) != 0;
+ }
+
+ DBClientCursor( DBConnector *_connector, const string &_ns, BSONObj _query, int _nToReturn,
+ int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions , int bs ) :
+ connector(_connector),
+ ns(_ns),
+ query(_query),
+ nToReturn(_nToReturn),
+ haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
+ nToSkip(_nToSkip),
+ fieldsToReturn(_fieldsToReturn),
+ opts(queryOptions),
+ batchSize(bs==1?2:bs),
+ m(new Message()),
+ cursorId(),
+ nReturned(),
+ pos(),
+ data(),
+ _ownCursor( true ){
+ }
+
+ DBClientCursor( DBConnector *_connector, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
+ connector(_connector),
+ ns(_ns),
+ nToReturn( _nToReturn ),
+ haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
+ opts( options ),
+ m(new Message()),
+ cursorId( _cursorId ),
+ nReturned(),
+ pos(),
+ data(),
+ _ownCursor( true ){
+ }
+
+ virtual ~DBClientCursor();
+
+ long long getCursorId() const { return cursorId; }
+
+ /** by default we "own" the cursor and will send the server a KillCursor
+ message when ~DBClientCursor() is called. This function overrides that.
+ */
+ void decouple() { _ownCursor = false; }
+
+ void attach( AScopedConnection * conn );
+
+ private:
+ friend class DBClientBase;
+ friend class DBClientConnection;
+ bool init();
+ int nextBatchSize();
+ DBConnector *connector;
+ string ns;
+ BSONObj query;
+ int nToReturn;
+ bool haveLimit;
+ int nToSkip;
+ const BSONObj *fieldsToReturn;
+ int opts;
+ int batchSize;
+ auto_ptr<Message> m;
+ stack< BSONObj > _putBack;
+ int resultFlags;
+ long long cursorId;
+ int nReturned;
+ int pos;
+ const char *data;
+ void dataReceived();
+ void requestMore();
+ void exhaustReceiveMore(); // for exhaust
+ bool _ownCursor; // see decouple()
+ string _scopedHost;
+
+ // Don't call from a virtual function
+ void _assertIfNull() const { uassert(13348, "connection died", this); }
+ };
+
+ /** iterate over objects in current batch only - will not cause a network call
+ */
+ class DBClientCursorBatchIterator {
+ public:
+ DBClientCursorBatchIterator( DBClientCursor &c ) : _c( c ), _n() {}
+ bool moreInCurrentBatch() { return _c.moreInCurrentBatch(); }
+ BSONObj nextSafe() {
+ massert( 13383, "BatchIterator empty", moreInCurrentBatch() );
+ ++_n;
+ return _c.nextSafe();
+ }
+ int n() const { return _n; }
+ private:
+ DBClientCursor &_c;
+ int _n;
+ };
+
+} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/client/distlock.cpp b/client/distlock.cpp
new file mode 100644
index 0000000..c264597
--- /dev/null
+++ b/client/distlock.cpp
@@ -0,0 +1,225 @@
+// @file distlock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "distlock.h"
+
+namespace mongo {
+
+ string lockPingNS = "config.lockpings";
+
+ ThreadLocalValue<string> distLockIds("");
+
+ string getDistLockProcess(){
+ static string s;
+ if ( s.empty() ){
+ stringstream ss;
+ ss << getHostNameCached() << ":" << time(0) << ":" << rand();
+ s = ss.str();
+ }
+ return s;
+ }
+
+ string getDistLockId(){
+ string s = distLockIds.get();
+ if ( s.empty() ){
+ stringstream ss;
+ ss << getDistLockProcess() << ":" << getThreadName() << ":" << rand();
+ s = ss.str();
+ distLockIds.set( s );
+ }
+ return s;
+ }
+
+ void distLockPingThread( ConnectionString addr ){
+ static int loops = 0;
+ while( ! inShutdown() ){
+ try {
+ ScopedDbConnection conn( addr );
+
+ // do ping
+ conn->update( lockPingNS ,
+ BSON( "_id" << getDistLockProcess() ) ,
+ BSON( "$set" << BSON( "ping" << DATENOW ) ) ,
+ true );
+
+
+ // remove really old entries
+ BSONObjBuilder f;
+ f.appendDate( "$lt" , jsTime() - ( 4 * 86400 * 1000 ) );
+ BSONObj r = BSON( "ping" << f.obj() );
+ conn->remove( lockPingNS , r );
+
+ // create index so remove is fast even with a lot of servers
+ if ( loops++ == 0 ){
+ conn->ensureIndex( lockPingNS , BSON( "ping" << 1 ) );
+ }
+
+ conn.done();
+ }
+ catch ( std::exception& e ){
+ log( LL_WARNING ) << "couldn't ping: " << e.what() << endl;
+ }
+ sleepsecs(30);
+ }
+ }
+
+
+ class DistributedLockPinger {
+ public:
+ DistributedLockPinger()
+ : _mutex( "DistributedLockPinger" ){
+ }
+
+ void got( const ConnectionString& conn ){
+ string s = conn.toString();
+ scoped_lock lk( _mutex );
+ if ( _seen.count( s ) > 0 )
+ return;
+ boost::thread t( boost::bind( &distLockPingThread , conn ) );
+ _seen.insert( s );
+ }
+
+ set<string> _seen;
+ mongo::mutex _mutex;
+
+ } distLockPinger;
+
+ DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes )
+ : _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes){
+ _id = BSON( "_id" << name );
+ _ns = "config.locks";
+ distLockPinger.got( conn );
+ }
+
+
+ bool DistributedLock::lock_try( string why , BSONObj * other ){
+ // check for recrusive
+ assert( getState() == 0 );
+
+ ScopedDbConnection conn( _conn );
+
+ BSONObjBuilder queryBuilder;
+ queryBuilder.appendElements( _id );
+ queryBuilder.append( "state" , 0 );
+
+ { // make sure its there so we can use simple update logic below
+ BSONObj o = conn->findOne( _ns , _id );
+ if ( o.isEmpty() ){
+ try {
+ conn->insert( _ns , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
+ }
+ catch ( UserException& ){
+ }
+ }
+ else if ( o["state"].numberInt() > 0 ){
+ BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
+ if ( lastPing.isEmpty() ){
+ // TODO: maybe this should clear, not sure yet
+ log() << "lastPing is empty! this could be bad: " << o << endl;
+ conn.done();
+ return false;
+ }
+
+ unsigned long long elapsed = jsTime() - lastPing["ping"].Date(); // in ms
+ elapsed = elapsed / ( 1000 * 60 ); // convert to minutes
+
+ if ( elapsed <= _takeoverMinutes ){
+ log(1) << "dist_lock lock failed because taken by: " << o << endl;
+ conn.done();
+ return false;
+ }
+
+ log() << "dist_lock forcefully taking over from: " << o << " elapsed minutes: " << elapsed << endl;
+ conn->update( _ns , _id , BSON( "$set" << BSON( "state" << 0 ) ) );
+ }
+ else if ( o["ts"].type() ){
+ queryBuilder.append( o["ts"] );
+ }
+ }
+
+ OID ts;
+ ts.init();
+
+ bool gotLock = false;
+ BSONObj now;
+
+ BSONObj whatIWant = BSON( "$set" << BSON( "state" << 1 <<
+ "who" << getDistLockId() << "process" << getDistLockProcess() <<
+ "when" << DATENOW << "why" << why << "ts" << ts ) );
+ try {
+ conn->update( _ns , queryBuilder.obj() , whatIWant );
+
+ BSONObj o = conn->getLastErrorDetailed();
+ now = conn->findOne( _ns , _id );
+
+ if ( o["n"].numberInt() == 0 ){
+ if ( other )
+ *other = now;
+ gotLock = false;
+ }
+ else {
+ gotLock = true;
+ }
+
+ }
+ catch ( UpdateNotTheSame& up ){
+ // this means our update got through on some, but not others
+
+ for ( unsigned i=0; i<up.size(); i++ ){
+ ScopedDbConnection temp( up[i].first );
+ BSONObj temp2 = temp->findOne( _ns , _id );
+
+ if ( now.isEmpty() || now["ts"] < temp2["ts"] ){
+ now = temp2.getOwned();
+ }
+
+ temp.done();
+ }
+
+ if ( now["ts"].OID() == ts ){
+ gotLock = true;
+ conn->update( _ns , _id , whatIWant );
+ }
+ else {
+ gotLock = false;
+ }
+ }
+
+ conn.done();
+
+ log(1) << "dist_lock lock gotLock: " << gotLock << " now: " << now << endl;
+
+ if ( ! gotLock )
+ return false;
+
+ _state.set( 1 );
+ return true;
+ }
+
+ void DistributedLock::unlock(){
+ ScopedDbConnection conn( _conn );
+ conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
+ log(1) << "dist_lock unlock: " << conn->findOne( _ns , _id ) << endl;
+ conn.done();
+
+ _state.set( 0 );
+ }
+
+
+}
diff --git a/client/distlock.h b/client/distlock.h
new file mode 100644
index 0000000..3a03390
--- /dev/null
+++ b/client/distlock.h
@@ -0,0 +1,91 @@
+// distlock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * distributed locking mechanism
+ */
+
+#include "../pch.h"
+#include "dbclient.h"
+#include "connpool.h"
+#include "redef_macros.h"
+#include "syncclusterconnection.h"
+
+namespace mongo {
+
+ class DistributedLock {
+ public:
+
+ /**
+ * @param takeoverMinutes how long before we steal lock in minutes
+ */
+ DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes = 10 );
+
+ int getState(){
+ return _state.get();
+ }
+
+ bool isLocked(){
+ return _state.get() != 0;
+ }
+
+ bool lock_try( string why , BSONObj * other = 0 );
+ void unlock();
+
+ private:
+ ConnectionString _conn;
+ string _name;
+ unsigned _takeoverMinutes;
+
+ string _ns;
+ BSONObj _id;
+
+ ThreadLocalValue<int> _state;
+ };
+
+ class dist_lock_try {
+ public:
+
+ dist_lock_try( DistributedLock * lock , string why )
+ : _lock(lock){
+ _got = _lock->lock_try( why , &_other );
+ }
+
+ ~dist_lock_try(){
+ if ( _got ){
+ _lock->unlock();
+ }
+ }
+
+ bool got() const {
+ return _got;
+ }
+
+ BSONObj other() const {
+ return _other;
+ }
+
+ private:
+ DistributedLock * _lock;
+ bool _got;
+ BSONObj _other;
+
+ };
+
+}
+
diff --git a/client/distlock_test.cpp b/client/distlock_test.cpp
new file mode 100644
index 0000000..0879b6e
--- /dev/null
+++ b/client/distlock_test.cpp
@@ -0,0 +1,80 @@
+// distlock_test.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../pch.h"
+#include "dbclient.h"
+#include "distlock.h"
+#include "../db/commands.h"
+
+namespace mongo {
+
+ class TestDistLockWithSync : public Command {
+ public:
+ TestDistLockWithSync() : Command( "_testDistLockWithSyncCluster" ){}
+ virtual void help( stringstream& help ) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const { return false; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ static void runThread(){
+ for ( int i=0; i<1000; i++ ){
+ if ( current->lock_try( "test" ) ){
+ gotit++;
+ for ( int j=0; j<2000; j++ ){
+ count++;
+ }
+ current->unlock();
+ }
+ }
+ }
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ DistributedLock lk( ConnectionString( cmdObj["host"].String() , ConnectionString::SYNC ), "testdistlockwithsync" );
+ current = &lk;
+ count = 0;
+ gotit = 0;
+
+ vector<shared_ptr<boost::thread> > l;
+ for ( int i=0; i<4; i++ ){
+ l.push_back( shared_ptr<boost::thread>( new boost::thread( runThread ) ) );
+ }
+
+ for ( unsigned i=0; i<l.size(); i++ )
+ l[i]->join();
+
+ result.append( "count" , count );
+ result.append( "gotit" , gotit );
+ current = 0;
+ return count == gotit * 2000;
+ }
+
+ static DistributedLock * current;
+ static int count;
+ static int gotit;
+
+ } testDistLockWithSyncCmd;
+
+
+ DistributedLock * TestDistLockWithSync::current;
+ int TestDistLockWithSync::count;
+ int TestDistLockWithSync::gotit;
+
+
+}
diff --git a/client/examples/clientTest.cpp b/client/examples/clientTest.cpp
index bbb82f6..83a556a 100644
--- a/client/examples/clientTest.cpp
+++ b/client/examples/clientTest.cpp
@@ -137,10 +137,14 @@ int main( int argc, const char **argv ) {
assert( conn.getLastError() == "" );
// nonexistent index test
- assert( conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}")).hasElement("$err") );
- assert( conn.getLastError() == "bad hint" );
- conn.resetError();
- assert( conn.getLastError() == "" );
+ bool asserted = false;
+ try {
+ conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}"));
+ }
+ catch ( ... ){
+ asserted = true;
+ }
+ assert( asserted );
//existing index
assert( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") );
@@ -176,8 +180,9 @@ int main( int argc, const char **argv ) {
}
BSONObj found = conn.findOne( tsns , mongo::BSONObj() );
+ cout << "old: " << out << "\nnew: " << found << endl;
assert( ( oldTime < found["ts"].timestampTime() ) ||
- ( oldInc + 1 == found["ts"].timestampInc() ) );
+ ( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) );
}
@@ -185,9 +190,9 @@ int main( int argc, const char **argv ) {
assert( conn.getLastError().empty() );
BufBuilder b;
- b.append( (int)0 ); // reserved
- b.append( (int)-1 ); // invalid # of cursors triggers exception
- b.append( (int)-1 ); // bogus cursor id
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)-1 ); // invalid # of cursors triggers exception
+ b.appendNum( (int)-1 ); // bogus cursor id
Message m;
m.setData( dbKillCursors, b.buf(), b.len() );
diff --git a/client/examples/tail.cpp b/client/examples/tail.cpp
index e844b32..3738b4f 100644
--- a/client/examples/tail.cpp
+++ b/client/examples/tail.cpp
@@ -22,34 +22,25 @@
using namespace mongo;
-void foo() { }
+void tail(DBClientBase& conn, const char *ns) {
+ BSONElement lastId = minKey.firstElement();
+ Query query = Query();
-/* "tail" the specified namespace, outputting elements as they are added.
- _id values must be inserted in increasing order for this to work. (Some other
- field could also be used.)
+ auto_ptr<DBClientCursor> c =
+ conn.query(ns, query, 0, 0, 0, QueryOption_CursorTailable);
- Note: one could use a capped collection and $natural order to do something
- similar, using sort({$natural:1}), and then not need to worry about
- _id's being in order.
-*/
-void tail(DBClientBase& conn, const char *ns) {
- conn.ensureIndex(ns, fromjson("{_id:1}"));
- BSONElement lastId;
- Query query = Query().sort("_id");
while( 1 ) {
- auto_ptr<DBClientCursor> c = conn.query(ns, query, 0, 0, 0, Option_CursorTailable);
- while( 1 ) {
- if( !c->more() ) {
- if( c->isDead() ) {
- // we need to requery
- break;
- }
- sleepsecs(1);
+ if( !c->more() ) {
+ if( c->isDead() ) {
+ break; // we need to requery
+ }
+
+ // all data (so far) exhausted, wait for more
+ sleepsecs(1);
+ continue;
}
BSONObj o = c->next();
lastId = o["_id"];
cout << o.toString() << endl;
- }
- query = QUERY( "_id" << GT << lastId ).sort("_id");
}
}
diff --git a/client/gridfs.cpp b/client/gridfs.cpp
index 892ec6e..b2ae478 100644
--- a/client/gridfs.cpp
+++ b/client/gridfs.cpp
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-#include "../stdafx.h"
+#include "pch.h"
#include <fcntl.h>
#include <utility>
@@ -34,15 +34,15 @@ namespace mongo {
const unsigned DEFAULT_CHUNK_SIZE = 256 * 1024;
- Chunk::Chunk( BSONObj o ){
+ GridFSChunk::GridFSChunk( BSONObj o ){
_data = o;
}
- Chunk::Chunk( BSONObj fileObject , int chunkNumber , const char * data , int len ){
+ GridFSChunk::GridFSChunk( BSONObj fileObject , int chunkNumber , const char * data , int len ){
BSONObjBuilder b;
b.appendAs( fileObject["_id"] , "files_id" );
b.append( "n" , chunkNumber );
- b.appendBinDataArray( "data" , data , len );
+ b.appendBinData( "data" , len, BinDataGeneral, data );
_data = b.obj();
}
@@ -50,7 +50,7 @@ namespace mongo {
GridFS::GridFS( DBClientBase& client , const string& dbName , const string& prefix ) : _client( client ) , _dbName( dbName ) , _prefix( prefix ){
_filesNS = dbName + "." + prefix + ".files";
_chunksNS = dbName + "." + prefix + ".chunks";
-
+ _chunkSize = DEFAULT_CHUNK_SIZE;
client.ensureIndex( _filesNS , BSON( "filename" << 1 ) );
client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) );
@@ -60,6 +60,11 @@ namespace mongo {
}
+ void GridFS::setChunkSize(unsigned int size) {
+ massert( 13296 , "invalid chunk size is specified", (size == 0));
+ _chunkSize = size;
+ }
+
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){
massert( 10279 , "large files not yet implemented", length <= 0xffffffff);
char const * const end = data + length;
@@ -70,8 +75,8 @@ namespace mongo {
int chunkNumber = 0;
while (data < end){
- int chunkLen = MIN(DEFAULT_CHUNK_SIZE, (unsigned)(end-data));
- Chunk c(idObj, chunkNumber, data, chunkLen);
+ int chunkLen = MIN(_chunkSize, (unsigned)(end-data));
+ GridFSChunk c(idObj, chunkNumber, data, chunkLen);
_client.insert( _chunksNS.c_str() , c._data );
chunkNumber++;
@@ -99,22 +104,24 @@ namespace mongo {
int chunkNumber = 0;
gridfs_offset length = 0;
while (!feof(fd)){
- boost::scoped_array<char>buf (new char[DEFAULT_CHUNK_SIZE]);
- char* bufPos = buf.get();
+ //boost::scoped_array<char>buf (new char[_chunkSize+1]);
+ char * buf = new char[_chunkSize+1];
+ char* bufPos = buf;//.get();
unsigned int chunkLen = 0; // how much in the chunk now
- while(chunkLen != DEFAULT_CHUNK_SIZE && !feof(fd)){
- int readLen = fread(bufPos, 1, DEFAULT_CHUNK_SIZE - chunkLen, fd);
+ while(chunkLen != _chunkSize && !feof(fd)){
+ int readLen = fread(bufPos, 1, _chunkSize - chunkLen, fd);
chunkLen += readLen;
bufPos += readLen;
- assert(chunkLen <= DEFAULT_CHUNK_SIZE);
+ assert(chunkLen <= _chunkSize);
}
- Chunk c(idObj, chunkNumber, buf.get(), chunkLen);
+ GridFSChunk c(idObj, chunkNumber, buf, chunkLen);
_client.insert( _chunksNS.c_str() , c._data );
length += chunkLen;
chunkNumber++;
+ delete[] buf;
}
if (fd != stdin)
@@ -125,7 +132,7 @@ namespace mongo {
return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
}
- BSONObj GridFS::insertFile(const string& name, const OID& id, unsigned length, const string& contentType){
+ BSONObj GridFS::insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType){
BSONObj res;
if ( ! _client.runCommand( _dbName.c_str() , BSON( "filemd5" << id << "root" << _prefix ) , res ) )
@@ -134,12 +141,17 @@ namespace mongo {
BSONObjBuilder file;
file << "_id" << id
<< "filename" << name
- << "length" << (unsigned) length
- << "chunkSize" << DEFAULT_CHUNK_SIZE
+ << "chunkSize" << _chunkSize
<< "uploadDate" << DATENOW
<< "md5" << res["md5"]
;
+ if (length < 1024*1024*1024){ // 2^30
+ file << "length" << (int) length;
+ }else{
+ file << "length" << (long long) length;
+ }
+
if (!contentType.empty())
file << "contentType" << contentType;
@@ -190,7 +202,7 @@ namespace mongo {
return meta_element.embeddedObject();
}
- Chunk GridFile::getChunk( int n ){
+ GridFSChunk GridFile::getChunk( int n ){
_exists();
BSONObjBuilder b;
b.appendAs( _obj["_id"] , "files_id" );
@@ -198,7 +210,7 @@ namespace mongo {
BSONObj o = _grid->_client.findOne( _grid->_chunksNS.c_str() , b.obj() );
uassert( 10014 , "chunk is empty!" , ! o.isEmpty() );
- return Chunk(o);
+ return GridFSChunk(o);
}
gridfs_offset GridFile::write( ostream & out ){
@@ -207,7 +219,7 @@ namespace mongo {
const int num = getNumChunks();
for ( int i=0; i<num; i++ ){
- Chunk c = getChunk( i );
+ GridFSChunk c = getChunk( i );
int len;
const char * data = c.data( len );
@@ -222,6 +234,7 @@ namespace mongo {
return write( cout );
} else {
ofstream out(where.c_str() , ios::out | ios::binary );
+ uassert(13325, "couldn't open file: " + where, out.is_open() );
return write( out );
}
}
diff --git a/client/gridfs.h b/client/gridfs.h
index 3165d5f..1c55f79 100644
--- a/client/gridfs.h
+++ b/client/gridfs.h
@@ -18,6 +18,7 @@
#pragma once
#include "dbclient.h"
+#include "redef_macros.h"
namespace mongo {
@@ -26,26 +27,19 @@ namespace mongo {
class GridFS;
class GridFile;
- class Chunk {
+ class GridFSChunk {
public:
- Chunk( BSONObj data );
- Chunk( BSONObj fileId , int chunkNumber , const char * data , int len );
+ GridFSChunk( BSONObj data );
+ GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len );
int len(){
int len;
- const char * data = _data["data"].binData( len );
- int * foo = (int*)data;
- assert( len - 4 == foo[0] );
- return len - 4;
+ _data["data"].binDataClean( len );
+ return len;
}
const char * data( int & len ){
- const char * data = _data["data"].binData( len );
- int * foo = (int*)data;
- assert( len - 4 == foo[0] );
-
- len = len - 4;
- return data + 4;
+ return _data["data"].binDataClean( len );
}
private:
@@ -68,6 +62,11 @@ namespace mongo {
~GridFS();
/**
+ * @param
+ */
+ void setChunkSize(unsigned int size);
+
+ /**
* puts the file reference by fileName into the db
* @param fileName local filename relative to process
* @param remoteName optional filename to use for file stored in GridFS
@@ -122,9 +121,10 @@ namespace mongo {
string _prefix;
string _filesNS;
string _chunksNS;
+ unsigned int _chunkSize;
// insert fileobject. All chunks must be in DB.
- BSONObj insertFile(const string& name, const OID& id, unsigned length, const string& contentType);
+ BSONObj insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType);
friend class GridFile;
};
@@ -176,7 +176,7 @@ namespace mongo {
return (int) ceil( (double)getContentLength() / (double)getChunkSize() );
}
- Chunk getChunk( int n );
+ GridFSChunk getChunk( int n );
/**
write the file to the output stream
@@ -200,4 +200,4 @@ namespace mongo {
};
}
-
+#include "undef_macros.h"
diff --git a/client/model.cpp b/client/model.cpp
index 3978105..7861b91 100644
--- a/client/model.cpp
+++ b/client/model.cpp
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-#include "stdafx.h"
+#include "pch.h"
#include "model.h"
#include "connpool.h"
@@ -57,6 +57,31 @@ namespace mongo {
BSONObjBuilder b;
serialize( b );
+ BSONElement myId;
+ {
+ BSONObjIterator i = b.iterator();
+ while ( i.more() ){
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName() , "_id" ) == 0 ){
+ myId = e;
+ break;
+ }
+ }
+ }
+
+ if ( myId.type() ){
+ if ( _id.isEmpty() ){
+ _id = myId.wrap();
+ }
+ else if ( myId.woCompare( _id.firstElement() ) ){
+ stringstream ss;
+ ss << "_id from serialize and stored differ: ";
+ ss << '[' << myId << "] != ";
+ ss << '[' << _id.firstElement() << ']';
+ throw UserException( 13121 , ss.str() );
+ }
+ }
+
if ( _id.isEmpty() ){
OID oid;
oid.init();
@@ -69,18 +94,22 @@ namespace mongo {
log(4) << "inserted new model " << getNS() << " " << o << endl;
}
else {
- BSONElement id = _id["_id"];
- b.append( id );
+ if ( myId.eoo() ){
+ myId = _id["_id"];
+ b.append( myId );
+ }
+
+ assert( ! myId.eoo() );
BSONObjBuilder qb;
- qb.append( id );
+ qb.append( myId );
BSONObj q = qb.obj();
BSONObj o = b.obj();
- log(4) << "updated old model" << getNS() << " " << q << " " << o << endl;
+ log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
- conn->update( getNS() , q , o );
+ conn->update( getNS() , q , o , true );
}
@@ -94,4 +123,16 @@ namespace mongo {
throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
}
+ BSONObj Model::toObject(){
+ BSONObjBuilder b;
+ serialize( b );
+ return b.obj();
+ }
+
+ void Model::append( const char * name , BSONObjBuilder& b ){
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ serialize( bb );
+ bb.done();
+ }
+
} // namespace mongo
diff --git a/client/model.h b/client/model.h
index f3a63ad..108efc0 100644
--- a/client/model.h
+++ b/client/model.h
@@ -18,6 +18,7 @@
#pragma once
#include "dbclient.h"
+#include "redef_macros.h"
namespace mongo {
@@ -40,7 +41,9 @@ namespace mongo {
virtual const char * getNS() = 0;
virtual void serialize(BSONObjBuilder& to) = 0;
virtual void unserialize(const BSONObj& from) = 0;
-
+ virtual BSONObj toObject();
+ virtual void append( const char * name , BSONObjBuilder& b );
+
virtual string modelServer() = 0;
/** Load a single object.
@@ -55,3 +58,5 @@ namespace mongo {
};
} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/client/parallel.cpp b/client/parallel.cpp
index bd29013..eeadb89 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -16,12 +16,13 @@
*/
-#include "stdafx.h"
+#include "pch.h"
#include "parallel.h"
#include "connpool.h"
#include "../db/queryutil.h"
#include "../db/dbmessage.h"
#include "../s/util.h"
+#include "../s/shard.h"
namespace mongo {
@@ -31,8 +32,13 @@ namespace mongo {
_ns = q.ns;
_query = q.query.copy();
_options = q.queryOptions;
- _fields = q.fields;
+ _fields = q.fields.copy();
+ _batchSize = q.ntoreturn;
+ if ( _batchSize == 1 )
+ _batchSize = 2;
+
_done = false;
+ _didInit = false;
}
ClusteredCursor::ClusteredCursor( const string& ns , const BSONObj& q , int options , const BSONObj& fields ){
@@ -40,37 +46,84 @@ namespace mongo {
_query = q.getOwned();
_options = options;
_fields = fields.getOwned();
+ _batchSize = 0;
+
_done = false;
+ _didInit = false;
}
ClusteredCursor::~ClusteredCursor(){
_done = true; // just in case
}
+
+ void ClusteredCursor::init(){
+ if ( _didInit )
+ return;
+ _didInit = true;
+ _init();
+ }
- auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra ){
+ auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft ){
uassert( 10017 , "cursor already done" , ! _done );
+ assert( _didInit );
BSONObj q = _query;
if ( ! extra.isEmpty() ){
q = concatQuery( q , extra );
}
- ScopedDbConnection conn( server );
- checkShardVersion( conn.conn() , _ns );
+ ShardConnection conn( server , _ns );
+
+ if ( conn.setVersion() ){
+ conn.done();
+ throw StaleConfigException( _ns , "ClusteredCursor::query ShardConnection had to change" , true );
+ }
+
+ if ( logLevel >= 5 ){
+ log(5) << "ClusteredCursor::query (" << type() << ") server:" << server
+ << " ns:" << _ns << " query:" << q << " num:" << num
+ << " _fields:" << _fields << " options: " << _options << endl;
+ }
+
+ auto_ptr<DBClientCursor> cursor =
+ conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
- log(5) << "ClusteredCursor::query server:" << server << " ns:" << _ns << " query:" << q << " num:" << num << " _fields:" << _fields << " options: " << _options << endl;
- auto_ptr<DBClientCursor> cursor = conn->query( _ns.c_str() , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options );
- if ( cursor->hasResultFlag( QueryResult::ResultFlag_ShardConfigStale ) )
+ assert( cursor.get() );
+
+ if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ){
+ conn.done();
throw StaleConfigException( _ns , "ClusteredCursor::query" );
+ }
+
+ if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ){
+ conn.done();
+ BSONObj o = cursor->next();
+ throw UserException( o["code"].numberInt() , o["$err"].String() );
+ }
+
+
+ cursor->attach( &conn );
conn.done();
return cursor;
}
+ BSONObj ClusteredCursor::explain( const string& server , BSONObj extra ){
+ BSONObj q = _query;
+ if ( ! extra.isEmpty() ){
+ q = concatQuery( q , extra );
+ }
+
+ ShardConnection conn( server , _ns );
+ BSONObj o = conn->findOne( _ns , Query( q ).explain() );
+ conn.done();
+ return o;
+ }
+
BSONObj ClusteredCursor::concatQuery( const BSONObj& query , const BSONObj& extraFilter ){
if ( ! query.hasField( "query" ) )
return _concatFilter( query , extraFilter );
-
+
BSONObjBuilder b;
BSONObjIterator i( query );
while ( i.more() ){
@@ -94,6 +147,112 @@ namespace mongo {
// TODO: should do some simplification here if possibl ideally
}
+ BSONObj ClusteredCursor::explain(){
+ BSONObjBuilder b;
+ b.append( "clusteredType" , type() );
+
+ long long nscanned = 0;
+ long long nscannedObjects = 0;
+ long long n = 0;
+ long long millis = 0;
+ double numExplains = 0;
+
+ map<string,list<BSONObj> > out;
+ {
+ _explain( out );
+
+ BSONObjBuilder x( b.subobjStart( "shards" ) );
+ for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ){
+ string shard = i->first;
+ list<BSONObj> l = i->second;
+ BSONArrayBuilder y( x.subarrayStart( shard.c_str() ) );
+ for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ){
+ BSONObj temp = *j;
+ y.append( temp );
+
+ nscanned += temp["nscanned"].numberLong();
+ nscannedObjects += temp["nscannedObjects"].numberLong();
+ n += temp["n"].numberLong();
+ millis += temp["millis"].numberLong();
+ numExplains++;
+ }
+ y.done();
+ }
+ x.done();
+ }
+
+ b.appendNumber( "nscanned" , nscanned );
+ b.appendNumber( "nscannedObjects" , nscannedObjects );
+ b.appendNumber( "n" , n );
+ b.appendNumber( "millisTotal" , millis );
+ b.append( "millisAvg" , (int)((double)millis / numExplains ) );
+ b.append( "numQueries" , (int)numExplains );
+ b.append( "numShards" , (int)out.size() );
+
+ return b.obj();
+ }
+
+ // -------- FilteringClientCursor -----------
+ FilteringClientCursor::FilteringClientCursor( const BSONObj filter )
+ : _matcher( filter ) , _done( true ){
+ }
+
+ FilteringClientCursor::FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter )
+ : _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ){
+ }
+
+ FilteringClientCursor::~FilteringClientCursor(){
+ }
+
+ void FilteringClientCursor::reset( auto_ptr<DBClientCursor> cursor ){
+ _cursor = cursor;
+ _next = BSONObj();
+ _done = _cursor.get() == 0;
+ }
+
+ bool FilteringClientCursor::more(){
+ if ( ! _next.isEmpty() )
+ return true;
+
+ if ( _done )
+ return false;
+
+ _advance();
+ return ! _next.isEmpty();
+ }
+
+ BSONObj FilteringClientCursor::next(){
+ assert( ! _next.isEmpty() );
+ assert( ! _done );
+
+ BSONObj ret = _next;
+ _next = BSONObj();
+ _advance();
+ return ret;
+ }
+
+ BSONObj FilteringClientCursor::peek(){
+ if ( _next.isEmpty() )
+ _advance();
+ return _next;
+ }
+
+ void FilteringClientCursor::_advance(){
+ assert( _next.isEmpty() );
+ if ( ! _cursor.get() || _done )
+ return;
+
+ while ( _cursor->more() ){
+ _next = _cursor->next();
+ if ( _matcher.matches( _next ) ){
+ if ( ! _cursor->moreInCurrentBatch() )
+ _next = _next.getOwned();
+ return;
+ }
+ _next = BSONObj();
+ }
+ _done = true;
+ }
// -------- SerialServerClusteredCursor -----------
@@ -107,10 +266,21 @@ namespace mongo {
sort( _servers.rbegin() , _servers.rend() );
_serverIndex = 0;
+
+ _needToSkip = q.ntoskip;
}
bool SerialServerClusteredCursor::more(){
- if ( _current.get() && _current->more() )
+
+ // TODO: optimize this by sending on first query and then back counting
+ // tricky in case where 1st server doesn't have any after
+ // need it to send n skipped
+ while ( _needToSkip > 0 && _current.more() ){
+ _current.next();
+ _needToSkip--;
+ }
+
+ if ( _current.more() )
return true;
if ( _serverIndex >= _servers.size() ){
@@ -119,17 +289,21 @@ namespace mongo {
ServerAndQuery& sq = _servers[_serverIndex++];
- _current = query( sq._server , 0 , sq._extra );
- if ( _current->more() )
- return true;
-
- // this sq has nothing, so keep looking
+ _current.reset( query( sq._server , 0 , sq._extra ) );
return more();
}
BSONObj SerialServerClusteredCursor::next(){
uassert( 10018 , "no more items" , more() );
- return _current->next();
+ return _current.next();
+ }
+
+ void SerialServerClusteredCursor::_explain( map< string,list<BSONObj> >& out ){
+ for ( unsigned i=0; i<_servers.size(); i++ ){
+ ServerAndQuery& sq = _servers[i];
+ list<BSONObj> & l = out[sq._server];
+ l.push_back( explain( sq._server , sq._extra ) );
+ }
}
// -------- ParallelSortClusteredCursor -----------
@@ -138,7 +312,8 @@ namespace mongo {
const BSONObj& sortKey )
: ClusteredCursor( q ) , _servers( servers ){
_sortKey = sortKey.getOwned();
- _init();
+ _needToSkip = q.ntoskip;
+ _finishCons();
}
ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
@@ -146,85 +321,123 @@ namespace mongo {
int options , const BSONObj& fields )
: ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ){
_sortKey = q.getSort().copy();
- _init();
+ _needToSkip = 0;
+ _finishCons();
}
- void ParallelSortClusteredCursor::_init(){
+ void ParallelSortClusteredCursor::_finishCons(){
_numServers = _servers.size();
- _cursors = new auto_ptr<DBClientCursor>[_numServers];
- _nexts = new BSONObj[_numServers];
+ _cursors = 0;
+
+ if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ){
+ // we need to make sure the sort key is in the project
+ bool isNegative = false;
+ BSONObjBuilder b;
+ {
+ BSONObjIterator i( _fields );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ b.append( e );
+ if ( ! e.trueValue() )
+ isNegative = true;
+ }
+ }
+
+ {
+ BSONObjIterator i( _sortKey );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ BSONElement f = _fields.getField( e.fieldName() );
+ if ( isNegative ){
+ uassert( 13431 , "have to have sort key in projection and removing it" , f.eoo() );
+ }
+ else if ( f.eoo() ){
+ // add to projection
+ b.append( e );
+ }
+ }
+ }
+
+ _fields = b.obj();
+ }
+ }
+
+ void ParallelSortClusteredCursor::_init(){
+ assert( ! _cursors );
+ _cursors = new FilteringClientCursor[_numServers];
// TODO: parellize
int num = 0;
- for ( set<ServerAndQuery>::iterator i = _servers.begin(); i!=_servers.end(); i++ ){
+ for ( set<ServerAndQuery>::iterator i = _servers.begin(); i!=_servers.end(); ++i ){
const ServerAndQuery& sq = *i;
- _cursors[num++] = query( sq._server , 0 , sq._extra );
+ _cursors[num++].reset( query( sq._server , 0 , sq._extra , _needToSkip ) );
}
}
ParallelSortClusteredCursor::~ParallelSortClusteredCursor(){
delete [] _cursors;
- delete [] _nexts;
+ _cursors = 0;
}
bool ParallelSortClusteredCursor::more(){
- for ( int i=0; i<_numServers; i++ ){
- if ( ! _nexts[i].isEmpty() )
- return true;
- if ( _cursors[i].get() && _cursors[i]->more() )
+ if ( _needToSkip > 0 ){
+ int n = _needToSkip;
+ _needToSkip = 0;
+
+ while ( n > 0 && more() ){
+ BSONObj x = next();
+ n--;
+ }
+
+ _needToSkip = n;
+ }
+
+ for ( int i=0; i<_numServers; i++ ){
+ if ( _cursors[i].more() )
return true;
}
return false;
}
BSONObj ParallelSortClusteredCursor::next(){
- advance();
-
BSONObj best = BSONObj();
int bestFrom = -1;
for ( int i=0; i<_numServers; i++){
- if ( _nexts[i].isEmpty() )
+ if ( ! _cursors[i].more() )
continue;
+
+ BSONObj me = _cursors[i].peek();
if ( best.isEmpty() ){
- best = _nexts[i];
+ best = me;
bestFrom = i;
continue;
}
- int comp = best.woSortOrder( _nexts[i] , _sortKey );
+ int comp = best.woSortOrder( me , _sortKey , true );
if ( comp < 0 )
continue;
- best = _nexts[i];
+ best = me;
bestFrom = i;
}
-
+
uassert( 10019 , "no more elements" , ! best.isEmpty() );
- _nexts[bestFrom] = BSONObj();
+ _cursors[bestFrom].next();
return best;
}
- void ParallelSortClusteredCursor::advance(){
- for ( int i=0; i<_numServers; i++ ){
-
- if ( ! _nexts[i].isEmpty() ){
- // already have a good object there
- continue;
- }
-
- if ( ! _cursors[i]->more() ){
- // cursor is dead, oh well
- continue;
- }
-
- _nexts[i] = _cursors[i]->next();
+ void ParallelSortClusteredCursor::_explain( map< string,list<BSONObj> >& out ){
+ for ( set<ServerAndQuery>::iterator i=_servers.begin(); i!=_servers.end(); ++i ){
+ const ServerAndQuery& sq = *i;
+ list<BSONObj> & l = out[sq._server];
+ l.push_back( explain( sq._server , sq._extra ) );
}
-
+
}
// -----------------
@@ -252,6 +465,7 @@ namespace mongo {
ScopedDbConnection conn( res->_server );
res->_ok = conn->runCommand( res->_db , res->_cmd , res->_res );
res->_done = true;
+ conn.done();
}
shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd ){
diff --git a/client/parallel.h b/client/parallel.h
index 88864ae..b60190a 100644
--- a/client/parallel.h
+++ b/client/parallel.h
@@ -16,16 +16,53 @@
*/
/**
- tools for wokring in parallel/sharded/clustered environment
+ tools for working in parallel/sharded/clustered environment
*/
-#include "../stdafx.h"
+#include "../pch.h"
#include "dbclient.h"
+#include "redef_macros.h"
#include "../db/dbmessage.h"
+#include "../db/matcher.h"
namespace mongo {
/**
+ * holder for a server address and a query to run
+ */
+ class ServerAndQuery {
+ public:
+ ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
+ _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ){
+ }
+
+ bool operator<( const ServerAndQuery& other ) const{
+ if ( ! _orderObject.isEmpty() )
+ return _orderObject.woCompare( other._orderObject ) < 0;
+
+ if ( _server < other._server )
+ return true;
+ if ( other._server > _server )
+ return false;
+ return _extra.woCompare( other._extra ) < 0;
+ }
+
+ string toString() const {
+ StringBuilder ss;
+ ss << "server:" << _server << " _extra:" << _extra.toString() << " _orderObject:" << _orderObject.toString();
+ return ss.str();
+ }
+
+ operator string() const {
+ return toString();
+ }
+
+ string _server;
+ BSONObj _extra;
+ BSONObj _orderObject;
+ };
+
+ /**
* this is a cursor that works over a set of servers
* can be used in serial/paralellel as controlled by sub classes
*/
@@ -34,7 +71,10 @@ namespace mongo {
ClusteredCursor( QueryMessage& q );
ClusteredCursor( const string& ns , const BSONObj& q , int options=0 , const BSONObj& fields=BSONObj() );
virtual ~ClusteredCursor();
-
+
+ /** call before using */
+ void init();
+
virtual bool more() = 0;
virtual BSONObj next() = 0;
@@ -42,53 +82,105 @@ namespace mongo {
virtual string type() const = 0;
+ virtual BSONObj explain();
+
protected:
- auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() );
+
+ virtual void _init() = 0;
+ auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 );
+ BSONObj explain( const string& server , BSONObj extraFilter = BSONObj() );
+
static BSONObj _concatFilter( const BSONObj& filter , const BSONObj& extraFilter );
+ virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
+
string _ns;
BSONObj _query;
int _options;
BSONObj _fields;
+ int _batchSize;
+
+ bool _didInit;
bool _done;
};
- /**
- * holder for a server address and a query to run
- */
- class ServerAndQuery {
+ class FilteringClientCursor {
public:
- ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
- _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ){
+ FilteringClientCursor( const BSONObj filter = BSONObj() );
+ FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter = BSONObj() );
+ ~FilteringClientCursor();
+
+ void reset( auto_ptr<DBClientCursor> cursor );
+
+ bool more();
+ BSONObj next();
+
+ BSONObj peek();
+ private:
+ void _advance();
+
+ Matcher _matcher;
+ auto_ptr<DBClientCursor> _cursor;
+
+ BSONObj _next;
+ bool _done;
+ };
+
+
+ class Servers {
+ public:
+ Servers(){
+ }
+
+ void add( const ServerAndQuery& s ){
+ add( s._server , s._extra );
+ }
+
+ void add( const string& server , const BSONObj& filter ){
+ vector<BSONObj>& mine = _filters[server];
+ mine.push_back( filter.getOwned() );
}
+
+ // TOOO: pick a less horrible name
+ class View {
+ View( const Servers* s ){
+ for ( map<string, vector<BSONObj> >::const_iterator i=s->_filters.begin(); i!=s->_filters.end(); ++i ){
+ _servers.push_back( i->first );
+ _filters.push_back( i->second );
+ }
+ }
+ public:
+ int size() const {
+ return _servers.size();
+ }
- bool operator<( const ServerAndQuery& other ) const{
- if ( ! _orderObject.isEmpty() )
- return _orderObject.woCompare( other._orderObject ) < 0;
+ string getServer( int n ) const {
+ return _servers[n];
+ }
+
+ vector<BSONObj> getFilter( int n ) const {
+ return _filters[ n ];
+ }
- if ( _server < other._server )
- return true;
- if ( other._server > _server )
- return false;
- return _extra.woCompare( other._extra ) < 0;
- }
+ private:
+ vector<string> _servers;
+ vector< vector<BSONObj> > _filters;
- string toString() const {
- StringBuilder ss;
- ss << "server:" << _server << " _extra:" << _extra << " _orderObject:" << _orderObject;
- return ss.str();
- }
+ friend class Servers;
+ };
- operator string() const {
- return toString();
+ View view() const {
+ return View( this );
}
+
- string _server;
- BSONObj _extra;
- BSONObj _orderObject;
+ private:
+ map<string, vector<BSONObj> > _filters;
+
+ friend class View;
};
@@ -102,11 +194,18 @@ namespace mongo {
virtual bool more();
virtual BSONObj next();
virtual string type() const { return "SerialServer"; }
- private:
+
+ protected:
+ virtual void _explain( map< string,list<BSONObj> >& out );
+
+ void _init(){}
+
vector<ServerAndQuery> _servers;
unsigned _serverIndex;
- auto_ptr<DBClientCursor> _current;
+ FilteringClientCursor _current;
+
+ int _needToSkip;
};
@@ -123,17 +222,18 @@ namespace mongo {
virtual bool more();
virtual BSONObj next();
virtual string type() const { return "ParallelSort"; }
- private:
+ protected:
+ void _finishCons();
void _init();
-
- void advance();
+
+ virtual void _explain( map< string,list<BSONObj> >& out );
int _numServers;
set<ServerAndQuery> _servers;
BSONObj _sortKey;
-
- auto_ptr<DBClientCursor> * _cursors;
- BSONObj * _nexts;
+
+ FilteringClientCursor * _cursors;
+ int _needToSkip;
};
/**
@@ -193,3 +293,5 @@ namespace mongo {
}
+
+#include "undef_macros.h"
diff --git a/client/redef_macros.h b/client/redef_macros.h
new file mode 100644
index 0000000..dd2e66f
--- /dev/null
+++ b/client/redef_macros.h
@@ -0,0 +1,55 @@
+/** @file redef_macros.h - redefine macros from undef_macros.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// If you define a new global un-prefixed macro, please add it here and in undef_macros
+
+// #pragma once // this file is intended to be processed multiple times
+
+#if defined(MONGO_MACROS_CLEANED)
+
+// util/allocator.h
+#define malloc MONGO_malloc
+#define realloc MONGO_realloc
+
+// util/assert_util.h
+#define assert MONGO_assert
+#define dassert MONGO_dassert
+#define wassert MONGO_wassert
+#define massert MONGO_massert
+#define uassert MONGO_uassert
+#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
+#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
+
+// util/goodies.h
+#define PRINT MONGO_PRINT
+#define PRINTFL MONGO_PRINTFL
+#define asctime MONGO_asctime
+#define gmtime MONGO_gmtime
+#define localtime MONGO_localtime
+#define ctime MONGO_ctime
+
+// util/debug_util.h
+#define DEV MONGO_DEV
+#define DEBUGGING MONGO_DEBUGGING
+#define SOMETIMES MONGO_SOMETIMES
+#define OCCASIONALLY MONGO_OCCASIONALLY
+#define RARELY MONGO_RARELY
+#define ONCE MONGO_ONCE
+
+#undef MONGO_MACROS_CLEANED
+#endif
+
diff --git a/client/syncclusterconnection.cpp b/client/syncclusterconnection.cpp
index 0a8fc79..5324b6c 100644
--- a/client/syncclusterconnection.cpp
+++ b/client/syncclusterconnection.cpp
@@ -16,15 +16,29 @@
*/
-#include "stdafx.h"
+#include "pch.h"
#include "syncclusterconnection.h"
#include "../db/dbmessage.h"
// error codes 8000-8009
namespace mongo {
+
+ SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L) : _mutex("SynClusterConnection") {
+ {
+ stringstream s;
+ int n=0;
+ for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ ) {
+ if( ++n > 1 ) s << ',';
+ s << i->toString();
+ }
+ _address = s.str();
+ }
+ for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ )
+ _connect( i->toString() );
+ }
- SyncClusterConnection::SyncClusterConnection( string commaSeperated ){
+ SyncClusterConnection::SyncClusterConnection( string commaSeperated ) : _mutex("SyncClusterConnection") {
_address = commaSeperated;
string::size_type idx;
while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ){
@@ -36,7 +50,7 @@ namespace mongo {
uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
}
- SyncClusterConnection::SyncClusterConnection( string a , string b , string c ){
+ SyncClusterConnection::SyncClusterConnection( string a , string b , string c ) : _mutex("SyncClusterConnection") {
_address = a + "," + b + "," + c;
// connect to all even if not working
_connect( a );
@@ -44,7 +58,7 @@ namespace mongo {
_connect( c );
}
- SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev ){
+ SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev ) : _mutex("SyncClusterConnection") {
assert(0);
}
@@ -55,6 +69,7 @@ namespace mongo {
}
bool SyncClusterConnection::prepare( string& errmsg ){
+ _lastErrors.clear();
return fsync( errmsg );
}
@@ -79,7 +94,7 @@ namespace mongo {
}
void SyncClusterConnection::_checkLast(){
- vector<BSONObj> all;
+ _lastErrors.clear();
vector<string> errors;
for ( size_t i=0; i<_conns.size(); i++ ){
@@ -95,17 +110,17 @@ namespace mongo {
catch ( ... ){
err += "unknown failure";
}
- all.push_back( res );
+ _lastErrors.push_back( res.getOwned() );
errors.push_back( err );
}
-
- assert( all.size() == errors.size() && all.size() == _conns.size() );
+
+ assert( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
stringstream err;
bool ok = true;
for ( size_t i = 0; i<_conns.size(); i++ ){
- BSONObj res = all[i];
+ BSONObj res = _lastErrors[i];
if ( res["ok"].trueValue() && res["fsyncFiles"].numberInt() > 0 )
continue;
ok = false;
@@ -117,35 +132,71 @@ namespace mongo {
throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
}
+ BSONObj SyncClusterConnection::getLastErrorDetailed(){
+ if ( _lastErrors.size() )
+ return _lastErrors[0];
+ return DBClientBase::getLastErrorDetailed();
+ }
+
void SyncClusterConnection::_connect( string host ){
log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
DBClientConnection * c = new DBClientConnection( true );
string errmsg;
if ( ! c->connect( host , errmsg ) )
log() << "SyncClusterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
+ _connAddresses.push_back( host );
_conns.push_back( c );
}
- auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
+ bool SyncClusterConnection::callRead( Message& toSend , Message& response ){
+ // TODO: need to save state of which one to go back to somehow...
+ return _conns[0]->callRead( toSend , response );
+ }
+ BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+
if ( ns.find( ".$cmd" ) != string::npos ){
string cmdName = query.obj.firstElement().fieldName();
- int lockType = 0;
-
- map<string,int>::iterator i = _lockTypes.find( cmdName );
- if ( i == _lockTypes.end() ){
- BSONObj info;
- uassert( 13053 , "help failed" , _commandOnActive( "admin" , BSON( cmdName << "1" << "help" << 1 ) , info ) );
- lockType = info["lockType"].numberInt();
- _lockTypes[cmdName] = lockType;
- }
- else {
- lockType = i->second;
+ int lockType = _lockType( cmdName );
+
+ if ( lockType > 0 ){ // write $cmd
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 13104 , (string)"SyncClusterConnection::findOne prepare failed: " + errmsg );
+
+ vector<BSONObj> all;
+ for ( size_t i=0; i<_conns.size(); i++ ){
+ all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() );
+ }
+
+ _checkLast();
+
+ for ( size_t i=0; i<all.size(); i++ ){
+ BSONObj temp = all[i];
+ if ( isOk( temp ) )
+ continue;
+ stringstream ss;
+ ss << "write $cmd failed on a shard: " << temp.jsonString();
+ ss << " " << _conns[i]->toString();
+ throw UserException( 13105 , ss.str() );
+ }
+
+ return all[0];
}
-
- uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection: " + cmdName , lockType <= 0 );
+ }
+
+ return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions );
+ }
+
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
+ _lastErrors.clear();
+ if ( ns.find( ".$cmd" ) != string::npos ){
+ string cmdName = query.obj.firstElement().fieldName();
+ int lockType = _lockType( cmdName );
+ uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection::query for:" + cmdName , lockType <= 0 );
}
return _queryOnActive( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
@@ -185,6 +236,10 @@ namespace mongo {
}
void SyncClusterConnection::insert( const string &ns, BSONObj obj ){
+
+ uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
+ ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() );
+
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
@@ -201,19 +256,52 @@ namespace mongo {
}
void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ){
- assert(0);
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8020 , (string)"SyncClusterConnection::remove prepare failed: " + errmsg );
+
+ for ( size_t i=0; i<_conns.size(); i++ ){
+ _conns[i]->remove( ns , query , justOne );
+ }
+
+ _checkLast();
}
void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ){
- string errmsg;
- if ( ! prepare( errmsg ) )
- throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
+
+ if ( upsert ){
+ uassert( 13120 , "SyncClusterConnection::update upsert query needs _id" , query.obj["_id"].type() );
+ }
+
+ if ( _writeConcern ){
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
+ }
for ( size_t i=0; i<_conns.size(); i++ ){
- _conns[i]->update( ns , query , obj , upsert , multi );
+ try {
+ _conns[i]->update( ns , query , obj , upsert , multi );
+ }
+ catch ( std::exception& e ){
+ if ( _writeConcern )
+ throw e;
+ }
}
- _checkLast();
+ if ( _writeConcern ){
+ _checkLast();
+ assert( _lastErrors.size() > 1 );
+
+ int a = _lastErrors[0]["n"].numberInt();
+ for ( unsigned i=1; i<_lastErrors.size(); i++ ){
+ int b = _lastErrors[i]["n"].numberInt();
+ if ( a == b )
+ continue;
+
+ throw UpdateNotTheSame( 8017 , "update not consistent" , _connAddresses , _lastErrors );
+ }
+ }
}
string SyncClusterConnection::_toString() const {
@@ -244,12 +332,41 @@ namespace mongo {
}
void SyncClusterConnection::say( Message &toSend ){
- assert(0);
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg );
+
+ for ( size_t i=0; i<_conns.size(); i++ ){
+ _conns[i]->say( toSend );
+ }
+
+ _checkLast();
}
void SyncClusterConnection::sayPiggyBack( Message &toSend ){
assert(0);
}
+ int SyncClusterConnection::_lockType( const string& name ){
+ {
+ scoped_lock lk(_mutex);
+ map<string,int>::iterator i = _lockTypes.find( name );
+ if ( i != _lockTypes.end() )
+ return i->second;
+ }
+
+ BSONObj info;
+ uassert( 13053 , "help failed" , _commandOnActive( "admin" , BSON( name << "1" << "help" << 1 ) , info ) );
+
+ int lockType = info["lockType"].numberInt();
+ scoped_lock lk(_mutex);
+ _lockTypes[name] = lockType;
+ return lockType;
+ }
+
+ void SyncClusterConnection::killCursor( long long cursorID ){
+ // should never need to do this
+ assert(0);
+ }
}
diff --git a/client/syncclusterconnection.h b/client/syncclusterconnection.h
index e3411e1..d1115f7 100644
--- a/client/syncclusterconnection.h
+++ b/client/syncclusterconnection.h
@@ -1,4 +1,5 @@
-// syncclusterconnection.h
+// @file syncclusterconnection.h
+
/*
* Copyright 2010 10gen Inc.
*
@@ -16,25 +17,36 @@
*/
-#include "../stdafx.h"
+#include "../pch.h"
#include "dbclient.h"
+#include "redef_macros.h"
namespace mongo {
/**
- * this is a connection to a cluster of servers that operate as one
- * for super high durability
+ * This is a connection to a cluster of servers that operate as one
+ * for super high durability.
+ *
+ * Write operations are two-phase. First, all nodes are asked to fsync. If successful
+ * everywhere, the write is sent everywhere and then followed by an fsync. There is no
+ * rollback if a problem occurs during the second phase. Naturally, with all these fsyncs,
+ * these operations will be quite slow -- use sparingly.
+ *
+ * Read operations are sent to a single random node.
+ *
+ * The class checks if a command is read or write style, and sends to a single
+ * node if a read lock command and to all in two phases with a write style command.
*/
class SyncClusterConnection : public DBClientBase {
public:
/**
- * @param commaSeperated should be 3 hosts comma seperated
+ * @param commaSeparated should be 3 hosts comma separated
*/
- SyncClusterConnection( string commaSeperated );
+ SyncClusterConnection( const list<HostAndPort> & );
+ SyncClusterConnection( string commaSeparated );
SyncClusterConnection( string a , string b , string c );
~SyncClusterConnection();
-
/**
* @return true if all servers are up and ready for writes
*/
@@ -47,6 +59,8 @@ namespace mongo {
// --- from DBClientInterface
+ virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions);
+
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
@@ -60,41 +74,65 @@ namespace mongo {
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi );
- virtual string toString(){
- return _toString();
- }
-
virtual bool call( Message &toSend, Message &response, bool assertOk );
virtual void say( Message &toSend );
virtual void sayPiggyBack( Message &toSend );
+
+ virtual void killCursor( long long cursorID );
virtual string getServerAddress() const { return _address; }
+ virtual bool isFailed() const { return false; }
+ virtual string toString() { return _toString(); }
- virtual bool isFailed() const {
- return false;
- }
+ virtual BSONObj getLastErrorDetailed();
+
+ virtual bool callRead( Message& toSend , Message& response );
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
private:
-
SyncClusterConnection( SyncClusterConnection& prev );
-
- string _toString() const;
-
+ string _toString() const;
bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
-
auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
-
- bool _isReadOnly( const string& name );
-
+ int _lockType( const string& name );
void _checkLast();
-
void _connect( string host );
string _address;
+ vector<string> _connAddresses;
vector<DBClientConnection*> _conns;
map<string,int> _lockTypes;
+ mongo::mutex _mutex;
+
+ vector<BSONObj> _lastErrors;
};
+ class UpdateNotTheSame : public UserException {
+ public:
+ UpdateNotTheSame( int code , const string& msg , const vector<string>& addrs , const vector<BSONObj>& lastErrors )
+ : UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ){
+ assert( _addrs.size() == _lastErrors.size() );
+ }
+
+ virtual ~UpdateNotTheSame() throw() {
+ }
+
+ unsigned size() const {
+ return _addrs.size();
+ }
+ pair<string,BSONObj> operator[](unsigned i) const {
+ return make_pair( _addrs[i] , _lastErrors[i] );
+ }
+
+ private:
+
+ vector<string> _addrs;
+ vector<BSONObj> _lastErrors;
+ };
+
};
+
+#include "undef_macros.h"
diff --git a/client/undef_macros.h b/client/undef_macros.h
new file mode 100644
index 0000000..cce8692
--- /dev/null
+++ b/client/undef_macros.h
@@ -0,0 +1,58 @@
+/** @file undef_macros.h - remove mongo-specific macros that might cause issues */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// If you define a new global un-prefixed macro, please add it here and in redef_macros
+
+// #pragma once // this file is intended to be processed multiple times
+
+
+/** MONGO_EXPOSE_MACROS - when defined, indicates that you are compiling a mongo program rather
+ than just using the C++ driver.
+*/
+#if !defined(MONGO_EXPOSE_MACROS) && !defined(MONGO_MACROS_CLEANED)
+
+// util/allocator.h
+#undef malloc
+#undef realloc
+
+// util/assert_util.h
+#undef assert
+#undef dassert
+#undef wassert
+#undef massert
+#undef uassert
+#undef BOOST_CHECK_EXCEPTION
+#undef DESTRUCTOR_GUARD
+
+// util/goodies.h
+#undef PRINT
+#undef PRINTFL
+#undef asctime
+#undef gmtime
+#undef localtime
+#undef ctime
+
+// util/debug_util.h
+#undef DEV
+#undef DEBUGGING
+#undef SOMETIMES
+#undef OCCASIONALLY
+#undef RARELY
+#undef ONCE
+
+#define MONGO_MACROS_CLEANED
+#endif
diff --git a/db/btree.cpp b/db/btree.cpp
index 0c8ca28..d646de8 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "stdafx.h"
+#include "pch.h"
#include "db.h"
#include "btree.h"
#include "pdfile.h"
@@ -55,8 +55,8 @@ namespace mongo {
}
int BucketBasics::Size() const {
- assert( _Size == BucketSize );
- return _Size;
+ assert( _wasSize == BucketSize );
+ return BucketSize;
}
inline void BucketBasics::setNotPacked() {
flags &= ~Packed;
@@ -84,7 +84,7 @@ namespace mongo {
bt_dmp=0;
}
- int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
+ int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount) {
{
bool f = false;
assert( f = true );
@@ -107,18 +107,24 @@ namespace mongo {
for ( int i = 0; i < n; i++ ) {
_KeyNode& kn = k(i);
- if ( kn.isUsed() ) kc++;
+ if ( kn.isUsed() ) {
+ kc++;
+ } else {
+ if ( unusedCount ) {
+ ++( *unusedCount );
+ }
+ }
if ( !kn.prevChildBucket.isNull() ) {
DiskLoc left = kn.prevChildBucket;
BtreeBucket *b = left.btree();
wassert( b->parent == thisLoc );
- kc += b->fullValidate(kn.prevChildBucket, order);
+ kc += b->fullValidate(kn.prevChildBucket, order, unusedCount);
}
}
if ( !nextChild.isNull() ) {
BtreeBucket *b = nextChild.btree();
wassert( b->parent == thisLoc );
- kc += b->fullValidate(nextChild, order);
+ kc += b->fullValidate(nextChild, order, unusedCount);
}
return kc;
@@ -126,7 +132,7 @@ namespace mongo {
int nDumped = 0;
- void BucketBasics::assertValid(const BSONObj &order, bool force) {
+ void BucketBasics::assertValid(const Ordering &order, bool force) {
if