use of com.mongodb.DB in project jetty.project by eclipse.
the class MongoTest method main.
public static void main(String... args) throws Exception {
Mongo m = new Mongo("127.0.0.1", 27017);
DB db = m.getDB("mydb");
Set<String> colls = db.getCollectionNames();
System.err.println("Colls=" + colls);
DBCollection coll = db.getCollection("testCollection");
BasicDBObject key = new BasicDBObject("id", "1234");
BasicDBObject sets = new BasicDBObject("name", "value");
BasicDBObject upsert = new BasicDBObject("$set", sets);
WriteResult result = coll.update(key, upsert, true, false);
System.err.println(result.getLastError());
while (coll.count() > 0) {
DBObject docZ = coll.findOne();
System.err.println("removing " + docZ);
if (docZ != null)
coll.remove(docZ);
}
}
use of com.mongodb.DB in project mongo-hadoop by mongodb.
the class ShardChunkMongoSplitter method calculateSplits.
// Generate one split per chunk.
@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
DB configDB = getConfigDB();
DBCollection chunksCollection = configDB.getCollection("chunks");
Map<String, List<String>> shardsMap;
try {
shardsMap = getShardsMap();
} catch (Exception e) {
//so abort the splitting
throw new SplitFailedException("Couldn't get shards information from config server", e);
}
return calculateSplitsFromChunks(chunksCollection.find().toArray(), shardsMap);
}
use of com.mongodb.DB in project mongo-hadoop by mongodb.
the class TestSharded method testDirectAccess.
public void testDirectAccess() {
DBCollection collection = getMongos().getDB("mongo_hadoop").getCollection("yield_historical.out");
collection.drop();
// HADOOP61 - simulate a failed migration by having some docs from one chunk
// also exist on another shard who does not own that chunk(duplicates)
DB config = getMongos().getDB("config");
DBObject chunk = config.getCollection("chunks").findOne(new BasicDBObject("shard", "sh01"));
DBObject query = new BasicDBObject("_id", new BasicDBObject("$gte", ((DBObject) chunk.get("min")).get("_id")).append("$lt", ((DBObject) chunk.get("max")).get("_id")));
List<DBObject> data = toList(getMongos().getDB("mongo_hadoop").getCollection("yield_historical.in").find(query));
DBCollection destination = getShard().getDB("mongo_hadoop").getCollection("yield_historical.in");
for (DBObject doc : data) {
destination.insert(doc, WriteConcern.UNACKNOWLEDGED);
}
MapReduceJob job = new MapReduceJob(TreasuryYieldXMLConfig.class.getName()).jar(JOBJAR_PATH).param(SPLITS_SLAVE_OK, "true").param(SPLITS_USE_SHARDS, "true").param(SPLITS_USE_CHUNKS, "false").inputUris(new MongoClientURIBuilder(getInputUri()).readPreference(ReadPreference.secondary()).build());
if (isHadoopV1()) {
job.outputCommitter(MongoOutputCommitter.class);
}
job.execute(isRunTestInVm());
compareResults(collection, getReference());
collection.drop();
MapReduceJob jobWithChunks = new MapReduceJob(TreasuryYieldXMLConfig.class.getName()).jar(JOBJAR_PATH).inputUris(new MongoClientURIBuilder(getInputUri()).readPreference(ReadPreference.secondary()).build()).param(SPLITS_SLAVE_OK, "true").param(SPLITS_USE_SHARDS, "true").param(SPLITS_USE_CHUNKS, "true");
if (isHadoopV1()) {
jobWithChunks.outputCommitter(MongoOutputCommitter.class);
}
jobWithChunks.execute(isRunTestInVm());
compareResults(collection, getReference());
}
use of com.mongodb.DB in project mongo-java-driver by mongodb.
the class Decimal128LegacyAPIQuickTour method main.
/**
* Run this main method to see the output of this quick example.
*
* @param args takes an optional single argument for the connection string
*/
public static void main(final String[] args) {
MongoClient mongoClient;
if (args.length == 0) {
// connect to the local database server
mongoClient = new MongoClient();
} else {
mongoClient = new MongoClient(new MongoClientURI(args[0]));
}
// get handle to "mydb" database
DB database = mongoClient.getDB("mydb");
// get a handle to the "test" collection
DBCollection collection = database.getCollection("test");
// drop all the data in it
collection.drop();
// make a document and insert it
BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("amount1", Decimal128.parse(".10")).append("amount2", new Decimal128(42L)).append("amount3", new Decimal128(new BigDecimal(".200")));
collection.insert(doc);
DBObject first = collection.findOne(QueryBuilder.start("amount1").is(new Decimal128(new BigDecimal(".10"))).get());
Decimal128 amount3 = (Decimal128) first.get("amount3");
BigDecimal amount2AsBigDecimal = amount3.bigDecimalValue();
System.out.println(amount3.toString());
System.out.println(amount2AsBigDecimal.toString());
}
use of com.mongodb.DB in project morphia by mongodb.
the class DatastoreImpl method ensureCaps.
@Override
public void ensureCaps() {
for (final MappedClass mc : mapper.getMappedClasses()) {
if (mc.getEntityAnnotation() != null && mc.getEntityAnnotation().cap().value() > 0) {
final CappedAt cap = mc.getEntityAnnotation().cap();
final String collName = mapper.getCollectionName(mc.getClazz());
final BasicDBObjectBuilder dbCapOpts = start("capped", true);
if (cap.value() > 0) {
dbCapOpts.add("size", cap.value());
}
if (cap.count() > 0) {
dbCapOpts.add("max", cap.count());
}
final DB database = getDB();
if (database.getCollectionNames().contains(collName)) {
final DBObject dbResult = database.command(start("collstats", collName).get());
if (dbResult.containsField("capped")) {
LOG.debug("DBCollection already exists and is capped already; doing nothing. " + dbResult);
} else {
LOG.warning("DBCollection already exists with same name(" + collName + ") and is not capped; not creating capped version!");
}
} else {
getDB().createCollection(collName, dbCapOpts.get());
LOG.debug("Created capped DBCollection (" + collName + ") with opts " + dbCapOpts);
}
}
}
}
Aggregations