use of com.mongodb.DBCollection in project jackrabbit-oak by apache.
the class MongoBlobStore method getAllChunkIds.
@Override
public Iterator<String> getAllChunkIds(long maxLastModifiedTime) throws Exception {
DBCollection collection = getBlobCollection();
DBObject fields = new BasicDBObject();
fields.put(MongoBlob.KEY_ID, 1);
QueryBuilder builder = new QueryBuilder();
if (maxLastModifiedTime != 0 && maxLastModifiedTime != -1) {
builder.and(MongoBlob.KEY_LAST_MOD).lessThanEquals(maxLastModifiedTime);
}
final DBCursor cur = collection.find(builder.get(), fields).hint(fields).addOption(Bytes.QUERYOPTION_SLAVEOK);
//TODO The cursor needs to be closed
return new AbstractIterator<String>() {
@Override
protected String computeNext() {
if (cur.hasNext()) {
MongoBlob blob = (MongoBlob) cur.next();
if (blob != null) {
return blob.getId();
}
}
return endOfData();
}
};
}
use of com.mongodb.DBCollection in project jackrabbit-oak by apache.
the class MongoDocumentStore method remove.
@Override
public <T extends Document> int remove(Collection<T> collection, String indexedProperty, long startValue, long endValue) throws DocumentStoreException {
log("remove", collection, indexedProperty, startValue, endValue);
int num = 0;
DBCollection dbCollection = getDBCollection(collection);
Stopwatch watch = startWatch();
try {
QueryBuilder queryBuilder = QueryBuilder.start(indexedProperty);
queryBuilder.greaterThan(startValue);
queryBuilder.lessThan(endValue);
try {
num = dbCollection.remove(queryBuilder.get()).getN();
} catch (Exception e) {
throw DocumentStoreException.convert(e, "Remove failed for " + collection + ": " + indexedProperty + " in (" + startValue + ", " + endValue + ")");
} finally {
if (collection == Collection.NODES) {
// this method is currently being used only for Journal collection while GC.
// But, to keep sanctity of the API, we need to acknowledge that Nodes collection
// could've been used. But, in this signature, there's no useful way to invalidate
// cache.
// So, we use the hammer for this task
invalidateCache();
}
}
} finally {
stats.doneRemove(watch.elapsed(TimeUnit.NANOSECONDS), collection, num);
}
return num;
}
use of com.mongodb.DBCollection in project jackrabbit-oak by apache.
the class MongoDocumentStore method findAndModify.
@SuppressWarnings("unchecked")
@CheckForNull
private <T extends Document> T findAndModify(Collection<T> collection, UpdateOp updateOp, boolean upsert, boolean checkConditions) {
DBCollection dbCollection = getDBCollection(collection);
// make sure we don't modify the original updateOp
updateOp = updateOp.copy();
DBObject update = createUpdate(updateOp, false);
Lock lock = null;
if (collection == Collection.NODES) {
lock = nodeLocks.acquire(updateOp.getId());
}
final Stopwatch watch = startWatch();
boolean newEntry = false;
try {
// get modCount of cached document
Long modCount = null;
T cachedDoc = null;
if (collection == Collection.NODES) {
cachedDoc = (T) nodesCache.getIfPresent(updateOp.getId());
if (cachedDoc != null) {
modCount = cachedDoc.getModCount();
}
}
// if we have a matching modCount
if (modCount != null) {
QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
query.and(Document.MOD_COUNT).is(modCount);
WriteResult result = dbCollection.update(query.get(), update);
if (result.getN() > 0) {
// success, update cached document
if (collection == Collection.NODES) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, cachedDoc, updateOp);
nodesCache.put(newDoc);
}
// return previously cached document
return cachedDoc;
}
}
// conditional update failed or not possible
// perform operation and get complete document
QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
DBObject oldNode = dbCollection.findAndModify(query.get(), null, null, /*sort*/
false, /*remove*/
update, false, /*returnNew*/
upsert);
if (oldNode == null) {
newEntry = true;
}
if (checkConditions && oldNode == null) {
return null;
}
T oldDoc = convertFromDBObject(collection, oldNode);
if (oldDoc != null) {
if (collection == Collection.NODES) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, updateOp);
nodesCache.put(newDoc);
updateLocalChanges(newDoc);
}
oldDoc.seal();
} else if (upsert) {
if (collection == Collection.NODES) {
NodeDocument doc = (NodeDocument) collection.newDocument(this);
UpdateUtils.applyChanges(doc, updateOp);
nodesCache.putIfAbsent(doc);
updateLocalChanges(doc);
}
} else {
// updateOp without conditions and not an upsert
// this means the document does not exist
}
return oldDoc;
} catch (Exception e) {
throw handleException(e, collection, updateOp.getId());
} finally {
if (lock != null) {
lock.unlock();
}
stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), collection, updateOp.getId(), newEntry, true, 0);
}
}
use of com.mongodb.DBCollection in project jackrabbit-oak by apache.
the class MongoDocumentStore method remove.
@Override
public <T extends Document> void remove(Collection<T> collection, List<String> keys) {
log("remove", keys);
DBCollection dbCollection = getDBCollection(collection);
Stopwatch watch = startWatch();
try {
for (List<String> keyBatch : Lists.partition(keys, IN_CLAUSE_BATCH_SIZE)) {
DBObject query = QueryBuilder.start(Document.ID).in(keyBatch).get();
try {
dbCollection.remove(query);
} catch (Exception e) {
throw DocumentStoreException.convert(e, "Remove failed for " + keyBatch);
} finally {
if (collection == Collection.NODES) {
for (String key : keyBatch) {
invalidateCache(collection, key);
}
}
}
}
} finally {
stats.doneRemove(watch.elapsed(TimeUnit.NANOSECONDS), collection, keys.size());
}
}
use of com.mongodb.DBCollection in project jackrabbit-oak by apache.
the class MongoDocumentStoreTest method doInsert.
private void doInsert(int n, boolean batch) throws Exception {
dropCollections();
DBCollection collection = connectionFactory.getConnection().getDB().getCollection("batchInsertTest");
DBObject index = new BasicDBObject();
index.put("_path", 1L);
DBObject options = new BasicDBObject();
options.put("unique", Boolean.TRUE);
collection.createIndex(index, options);
log("Inserting " + n + " batch? " + batch);
long start = System.currentTimeMillis();
if (batch) {
DBObject[] arr = new BasicDBObject[n];
for (int i = 0; i < n; i++) {
arr[i] = new BasicDBObject("_path", "/a" + i);
}
collection.insert(arr);
} else {
for (int i = 0; i < n; i++) {
collection.insert(new BasicDBObject("_path", "/a" + i));
}
}
long end = System.currentTimeMillis();
log("Done: " + (end - start) + "ms");
dropCollections();
}
Aggregations