use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalCreate.
@CheckForNull
private <T extends Document> boolean internalCreate(Collection<T> collection, List<UpdateOp> updates) {
final Stopwatch watch = startWatch();
List<String> ids = new ArrayList<String>(updates.size());
boolean success = true;
try {
// try up to CHUNKSIZE ops in one transaction
for (List<UpdateOp> chunks : Lists.partition(updates, CHUNKSIZE)) {
List<T> docs = new ArrayList<T>();
for (UpdateOp update : chunks) {
ids.add(update.getId());
maintainUpdateStats(collection, update.getId());
UpdateUtils.assertUnconditional(update);
T doc = collection.newDocument(this);
addUpdateCounters(update);
UpdateUtils.applyChanges(doc, update);
if (!update.getId().equals(doc.getId())) {
throw new DocumentStoreException("ID mismatch - UpdateOp: " + update.getId() + ", ID property: " + doc.getId());
}
docs.add(doc);
}
boolean done = insertDocuments(collection, docs);
if (done) {
if (collection == Collection.NODES) {
for (T doc : docs) {
nodesCache.putIfAbsent((NodeDocument) doc);
}
}
} else {
success = false;
}
}
return success;
} catch (DocumentStoreException ex) {
return false;
} finally {
stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, success);
}
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class RDBDocumentStore method readDocumentUncached.
@CheckForNull
private <T extends Document> T readDocumentUncached(Collection<T> collection, String id, NodeDocument cachedDoc) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
final Stopwatch watch = startWatch();
boolean docFound = true;
try {
long lastmodcount = -1, lastmodified = -1;
if (cachedDoc != null) {
lastmodcount = modcountOf(cachedDoc);
lastmodified = modifiedOf(cachedDoc);
}
connection = this.ch.getROConnection();
RDBRow row = db.read(connection, tmd, id, lastmodcount, lastmodified);
connection.commit();
if (row == null) {
docFound = false;
return null;
} else {
if (lastmodcount == row.getModcount() && lastmodified == row.getModified() && lastmodified >= 1) {
// we can re-use the cached document
cachedDoc.markUpToDate(System.currentTimeMillis());
return castAsT(cachedDoc);
} else {
return convertFromDBObject(collection, row);
}
}
} catch (Exception ex) {
throw new DocumentStoreException(ex);
} finally {
this.ch.closeConnection(connection);
stats.doneFindUncached(watch.elapsed(TimeUnit.NANOSECONDS), collection, id, docFound, false);
}
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class MongoDocumentStore method remove.
@Override
public <T extends Document> int remove(Collection<T> collection, Map<String, Map<Key, Condition>> toRemove) {
log("remove", toRemove);
int num = 0;
DBCollection dbCollection = getDBCollection(collection);
Stopwatch watch = startWatch();
try {
List<String> batchIds = Lists.newArrayList();
List<DBObject> batch = Lists.newArrayList();
Iterator<Entry<String, Map<Key, Condition>>> it = toRemove.entrySet().iterator();
while (it.hasNext()) {
Entry<String, Map<Key, Condition>> entry = it.next();
QueryBuilder query = createQueryForUpdate(entry.getKey(), entry.getValue());
batchIds.add(entry.getKey());
batch.add(query.get());
if (!it.hasNext() || batch.size() == IN_CLAUSE_BATCH_SIZE) {
DBObject q = new BasicDBObject();
q.put(QueryOperators.OR, batch);
try {
num += dbCollection.remove(q).getN();
} catch (Exception e) {
throw DocumentStoreException.convert(e, "Remove failed for " + batch);
} finally {
if (collection == Collection.NODES) {
invalidateCache(batchIds);
}
}
batchIds.clear();
batch.clear();
}
}
} finally {
stats.doneRemove(watch.elapsed(TimeUnit.NANOSECONDS), collection, num);
}
return num;
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class MongoDocumentStore method createOrUpdate.
/**
* Try to apply all the {@link UpdateOp}s with at least MongoDB requests as
* possible. The return value is the list of the old documents (before
* applying changes). The mechanism is as follows:
*
* <ol>
* <li>For each UpdateOp try to read the assigned document from the cache.
* Add them to {@code oldDocs}.</li>
* <li>Prepare a list of all UpdateOps that doesn't have their documents and
* read them in one find() call. Add results to {@code oldDocs}.</li>
* <li>Prepare a bulk update. For each remaining UpdateOp add following
* operation:
* <ul>
* <li>Find document with the same id and the same mod_count as in the
* {@code oldDocs}.</li>
* <li>Apply changes from the UpdateOps.</li>
* </ul>
* </li>
* <li>Execute the bulk update.</li>
* </ol>
*
* If some other process modifies the target documents between points 2 and
* 3, the mod_count will be increased as well and the bulk update will fail
* for the concurrently modified docs. The method will then remove the
* failed documents from the {@code oldDocs} and restart the process from
* point 2. It will stop after 3rd iteration.
*/
@SuppressWarnings("unchecked")
@CheckForNull
@Override
public <T extends Document> List<T> createOrUpdate(Collection<T> collection, List<UpdateOp> updateOps) {
log("createOrUpdate", updateOps);
Map<String, UpdateOp> operationsToCover = new LinkedHashMap<String, UpdateOp>();
List<UpdateOp> duplicates = new ArrayList<UpdateOp>();
Map<UpdateOp, T> results = new LinkedHashMap<UpdateOp, T>();
final Stopwatch watch = startWatch();
try {
for (UpdateOp updateOp : updateOps) {
UpdateUtils.assertUnconditional(updateOp);
UpdateOp clone = updateOp.copy();
if (operationsToCover.containsKey(updateOp.getId())) {
duplicates.add(clone);
} else {
operationsToCover.put(updateOp.getId(), clone);
}
results.put(clone, null);
}
Map<String, T> oldDocs = new HashMap<String, T>();
if (collection == Collection.NODES) {
oldDocs.putAll((Map<String, T>) getCachedNodes(operationsToCover.keySet()));
}
for (int i = 0; i <= bulkRetries; i++) {
if (operationsToCover.size() <= 2) {
// in bulk mode wouldn't result in any performance gain
break;
}
for (List<UpdateOp> partition : Lists.partition(Lists.newArrayList(operationsToCover.values()), bulkSize)) {
Map<UpdateOp, T> successfulUpdates = bulkUpdate(collection, partition, oldDocs);
results.putAll(successfulUpdates);
operationsToCover.values().removeAll(successfulUpdates.keySet());
}
}
// if there are some changes left, we'll apply them one after another
Iterator<UpdateOp> it = Iterators.concat(operationsToCover.values().iterator(), duplicates.iterator());
while (it.hasNext()) {
UpdateOp op = it.next();
it.remove();
T oldDoc = createOrUpdate(collection, op);
if (oldDoc != null) {
results.put(op, oldDoc);
}
}
} catch (MongoException e) {
throw handleException(e, collection, Iterables.transform(updateOps, new Function<UpdateOp, String>() {
@Override
public String apply(UpdateOp input) {
return input.getId();
}
}));
} finally {
stats.doneCreateOrUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, Lists.transform(updateOps, new Function<UpdateOp, String>() {
@Override
public String apply(UpdateOp input) {
return input.getId();
}
}));
}
List<T> resultList = new ArrayList<T>(results.values());
log("createOrUpdate returns", resultList);
return resultList;
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalQuery.
private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond.getOperator() + "'" + cond.getValue() + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
}
final Stopwatch watch = startWatch();
int resultSize = 0;
CacheChangesTracker tracker = null;
try {
if (collection == Collection.NODES) {
tracker = nodesCache.registerTracker(fromKey, toKey);
}
long now = System.currentTimeMillis();
connection = this.ch.getROConnection();
String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
List<RDBRow> dbresult = db.query(connection, tmd, from, to, excludeKeyPatterns, conditions, limit);
connection.commit();
int size = dbresult.size();
List<T> result = new ArrayList<T>(size);
for (int i = 0; i < size; i++) {
// free RDBRow as early as possible
RDBRow row = dbresult.set(i, null);
T doc = getIfCached(collection, row.getId(), row.getModcount());
if (doc == null) {
// parse DB contents into document if and only if it's not
// already in the cache
doc = convertFromDBObject(collection, row);
} else {
// we got a document from the cache, thus collection is NODES
// and a tracker is present
long lastmodified = modifiedOf(doc);
if (lastmodified == row.getModified() && lastmodified >= 1) {
Lock lock = locks.acquire(row.getId());
try {
if (!tracker.mightBeenAffected(row.getId())) {
// otherwise mark it as fresh
((NodeDocument) doc).markUpToDate(now);
}
} finally {
lock.unlock();
}
} else {
// we need a fresh document instance
doc = convertFromDBObject(collection, row);
}
}
result.add(doc);
}
if (collection == Collection.NODES) {
nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result));
}
resultSize = result.size();
return result;
} catch (Exception ex) {
LOG.error("SQL exception on query", ex);
throw new DocumentStoreException(ex);
} finally {
if (tracker != null) {
tracker.close();
}
this.ch.closeConnection(connection);
stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, !conditions.isEmpty(), resultSize, -1, false);
}
}
Aggregations