use of com.mongodb.DBCursor in project mongomvcc by igd-geo.
the class MongoDBVMaintenance method doFindDanglingCommits.
private long[] doFindDanglingCommits(long expiry, TimeUnit unit) {
long maxTime = getMaxTime(expiry, unit);
//load all commits which are older than the expiry time. mark them as dangling
DBCollection collCommits = _db.getDB().getCollection(MongoDBConstants.COLLECTION_COMMITS);
DBCursor commits = collCommits.find(new BasicDBObject(MongoDBConstants.TIMESTAMP, //also include commits without a timestamp
new BasicDBObject("$not", new BasicDBObject("$gte", maxTime))), new BasicDBObject(MongoDBConstants.ID, 1));
IdSet danglingCommits = new IdHashSet();
for (DBObject o : commits) {
long cid = (Long) o.get(MongoDBConstants.ID);
danglingCommits.add(cid);
}
//walk through all branches and eliminate commits which are not dangling
DBCollection collBranches = _db.getDB().getCollection(MongoDBConstants.COLLECTION_BRANCHES);
DBCursor branches = collBranches.find(new BasicDBObject(), new BasicDBObject(MongoDBConstants.CID, 1));
VHistory history = _db.getHistory();
IdSet alreadyCheckedCommits = new IdHashSet();
for (DBObject o : branches) {
long cid = (Long) o.get(MongoDBConstants.CID);
while (cid != 0) {
if (alreadyCheckedCommits.contains(cid)) {
break;
}
alreadyCheckedCommits.add(cid);
danglingCommits.remove(cid);
cid = history.getParent(cid);
}
}
//all remaining commits must be dangling
return danglingCommits.toArray();
}
use of com.mongodb.DBCursor in project mongomvcc by igd-geo.
the class MongoDBVCollectionTest method lifetimeDeletedOptimization.
/**
* Tests if lifetime optimization takes effect. Objects that have
* been deleted should not be loaded but filtered out on the
* database level already.
*/
@Test
public void lifetimeDeletedOptimization() {
//insert two documents to skip in-index shortcut
putPerson("Max", 6);
putPerson("Pax", 8);
_master.commit();
VCollection persons = _master.getCollection("persons");
VCursor cursor = persons.find();
DBCursor dbcursor = extractDBCursor(cursor);
assertEquals(2, cursor.size());
assertTrue(hasAttachedFilter(cursor));
assertEquals(2, dbcursor.size());
putPerson("Elvis", 3);
_master.commit();
persons = _master.getCollection("persons");
cursor = persons.find();
dbcursor = extractDBCursor(cursor);
assertEquals(3, cursor.size());
assertTrue(hasAttachedFilter(cursor));
assertEquals(3, dbcursor.size());
persons.delete(_factory.createDocument("name", "Max"));
_master.commit();
persons = _master.getCollection("persons");
cursor = persons.find();
dbcursor = extractDBCursor(cursor);
assertEquals(2, cursor.size());
assertTrue(hasAttachedFilter(cursor));
assertEquals(2, dbcursor.size());
}
use of com.mongodb.DBCursor in project mongo-hadoop by mongodb.
the class StandaloneMongoSplitter method calculateSplits.
@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration());
final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration());
final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration());
final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration());
final MongoClientURI inputURI;
DBCollection inputCollection = null;
final ArrayList<InputSplit> returnVal;
try {
inputURI = MongoConfigUtil.getInputURI(getConfiguration());
MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration());
if (authURI != null) {
inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI);
} else {
inputCollection = MongoConfigUtil.getCollection(inputURI);
}
returnVal = new ArrayList<InputSplit>();
final String ns = inputCollection.getFullName();
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts()));
}
final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey).add("min", splitKeyMin).add("max", splitKeyMax).add("force", false).add("maxChunkSize", splitSize).get();
CommandResult data;
boolean ok = true;
try {
data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final MongoException e) {
// 2.0 servers throw exceptions rather than info in a CommandResult
data = null;
LOG.info(e.getMessage(), e);
if (e.getMessage().contains("unrecognized command: splitVector")) {
ok = false;
} else {
throw e;
}
}
if (data != null) {
if (data.containsField("$err")) {
throw new SplitFailedException("Error calculating splits: " + data);
} else if (!data.get("ok").equals(1.0)) {
ok = false;
}
}
if (!ok) {
final CommandResult stats = inputCollection.getStats();
if (stats.containsField("primary")) {
final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards").find(new BasicDBObject("_id", stats.getString("primary")));
try {
if (shards.hasNext()) {
final DBObject shard = shards.next();
final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", "");
final MongoClientURI shardHost;
if (authURI != null) {
shardHost = new MongoClientURIBuilder(authURI).host(host).build();
} else {
shardHost = new MongoClientURIBuilder(inputURI).host(host).build();
}
MongoClient shardClient = null;
try {
shardClient = new MongoClient(shardHost);
data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary());
} catch (final Exception e) {
LOG.error(e.getMessage(), e);
} finally {
if (shardClient != null) {
shardClient.close();
}
}
}
} finally {
shards.close();
}
}
if (data != null && !data.get("ok").equals(1.0)) {
throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg"));
}
}
// Comes in a format where "min" and "max" are implicit
// and each entry is just a boundary key; not ranged
final BasicDBList splitData = (BasicDBList) data.get("splitKeys");
if (splitData.size() == 0) {
LOG.warn("WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable.");
}
// Lower boundary of the first min split
BasicDBObject lastKey = null;
// If splitKeyMin was given, use it as first boundary.
if (!splitKeyMin.toMap().isEmpty()) {
lastKey = new BasicDBObject(splitKeyMin.toMap());
}
for (final Object aSplitData : splitData) {
final BasicDBObject currentKey = (BasicDBObject) aSplitData;
returnVal.add(createSplitFromBounds(lastKey, currentKey));
lastKey = currentKey;
}
BasicDBObject maxKey = null;
// If splitKeyMax was given, use it as last boundary.
if (!splitKeyMax.toMap().isEmpty()) {
maxKey = new BasicDBObject(splitKeyMax.toMap());
}
// Last max split
final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey);
returnVal.add(lastSplit);
} finally {
if (inputCollection != null) {
MongoConfigUtil.close(inputCollection.getDB().getMongo());
}
}
if (MongoConfigUtil.isFilterEmptySplitsEnabled(getConfiguration())) {
return filterEmptySplits(returnVal);
}
return returnVal;
}
use of com.mongodb.DBCursor in project mongo-java-driver by mongodb.
the class CLI method main.
// CHECKSTYLE:OFF
public static void main(final String[] args) throws Exception {
if (args.length < 1) {
printUsage();
return;
}
for (int i = 0; i < args.length; i++) {
String s = args[i];
if (s.equals("--db")) {
db = args[i + 1];
i++;
continue;
}
if (s.equals("--host")) {
host = args[i + 1];
i++;
continue;
}
if (s.equals("help")) {
printUsage();
return;
}
if (s.equals("list")) {
GridFS fs = getGridFS();
System.out.printf("%-60s %-10s%n", "Filename", "Length");
DBCursor fileListCursor = fs.getFileList();
try {
while (fileListCursor.hasNext()) {
DBObject o = fileListCursor.next();
System.out.printf("%-60s %-10d%n", o.get("filename"), ((Number) o.get("length")).longValue());
}
} finally {
fileListCursor.close();
}
return;
}
if (s.equals("get")) {
GridFS fs = getGridFS();
String fn = args[i + 1];
GridFSDBFile f = fs.findOne(fn);
if (f == null) {
System.err.println("can't find file: " + fn);
return;
}
f.writeTo(f.getFilename());
return;
}
if (s.equals("put")) {
GridFS fs = getGridFS();
String fn = args[i + 1];
GridFSInputFile f = fs.createFile(new File(fn));
f.save();
f.validate();
return;
}
if (s.equals("md5")) {
GridFS fs = getGridFS();
String fn = args[i + 1];
GridFSDBFile f = fs.findOne(fn);
if (f == null) {
System.err.println("can't find file: " + fn);
return;
}
MessageDigest md5 = MessageDigest.getInstance("MD5");
md5.reset();
int read = 0;
DigestInputStream is = new DigestInputStream(f.getInputStream(), md5);
try {
while (is.read() >= 0) {
read++;
int r = is.read(new byte[17]);
if (r < 0) {
break;
}
read += r;
}
} finally {
is.close();
}
byte[] digest = md5.digest();
System.out.println("length: " + read + " md5: " + Util.toHex(digest));
return;
}
System.err.println("unknown option: " + s);
return;
}
}
use of com.mongodb.DBCursor in project morphia by mongodb.
the class AggregationTest method testOutNamedCollection.
@Test
public void testOutNamedCollection() {
checkMinServerVersion(2.6);
getDs().save(asList(new Book("The Banquet", "Dante", 2, "Italian", "Sophomore Slump"), new Book("Divine Comedy", "Dante", 1, "Not Very Funny", "I mean for a 'comedy'", "Ironic"), new Book("Eclogues", "Dante", 2, "Italian", ""), new Book("The Odyssey", "Homer", 10, "Classic", "Mythology", "Sequel"), new Book("Iliad", "Homer", 10, "Mythology", "Trojan War", "No Sequel")));
getDs().createAggregation(Book.class).match(getDs().getQueryFactory().createQuery(getDs()).field("author").equal("Homer")).group("author", grouping("copies", sum("copies"))).out("testAverage", Author.class);
DBCursor testAverage = getDb().getCollection("testAverage").find();
Assert.assertNotNull(testAverage);
try {
Assert.assertEquals(20, testAverage.next().get("copies"));
} finally {
testAverage.close();
}
}
Aggregations