use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalCreate.
@CheckForNull
private <T extends Document> boolean internalCreate(Collection<T> collection, List<UpdateOp> updates) {
final Stopwatch watch = startWatch();
List<String> ids = new ArrayList<String>(updates.size());
boolean success = true;
try {
// try up to CHUNKSIZE ops in one transaction
for (List<UpdateOp> chunks : Lists.partition(updates, CHUNKSIZE)) {
List<T> docs = new ArrayList<T>();
for (UpdateOp update : chunks) {
ids.add(update.getId());
maintainUpdateStats(collection, update.getId());
UpdateUtils.assertUnconditional(update);
T doc = collection.newDocument(this);
addUpdateCounters(update);
UpdateUtils.applyChanges(doc, update);
if (!update.getId().equals(doc.getId())) {
throw new DocumentStoreException("ID mismatch - UpdateOp: " + update.getId() + ", ID property: " + doc.getId());
}
docs.add(doc);
}
boolean done = insertDocuments(collection, docs);
if (done) {
if (collection == Collection.NODES) {
for (T doc : docs) {
nodesCache.putIfAbsent((NodeDocument) doc);
}
}
} else {
success = false;
}
}
return success;
} catch (DocumentStoreException ex) {
return false;
} finally {
stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, success);
}
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.
the class RDBDocumentStore method bulkUpdate.
private <T extends Document> Map<UpdateOp, T> bulkUpdate(Collection<T> collection, List<UpdateOp> updates, Map<String, T> oldDocs, boolean upsert) {
Set<String> missingDocs = new HashSet<String>();
for (UpdateOp op : updates) {
if (!oldDocs.containsKey(op.getId())) {
missingDocs.add(op.getId());
}
}
for (T doc : readDocumentsUncached(collection, missingDocs).values()) {
oldDocs.put(doc.getId(), doc);
if (collection == Collection.NODES) {
nodesCache.putIfAbsent((NodeDocument) doc);
}
}
List<T> docsToUpdate = new ArrayList<T>(updates.size());
Set<String> keysToUpdate = new HashSet<String>();
for (UpdateOp update : updates) {
String id = update.getId();
T modifiedDoc = collection.newDocument(this);
if (oldDocs.containsKey(id)) {
oldDocs.get(id).deepCopy(modifiedDoc);
}
UpdateUtils.applyChanges(modifiedDoc, update);
docsToUpdate.add(modifiedDoc);
keysToUpdate.add(id);
}
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
try {
connection = this.ch.getRWConnection();
Set<String> successfulUpdates = db.update(connection, tmd, docsToUpdate, upsert);
connection.commit();
Set<String> failedUpdates = Sets.difference(keysToUpdate, successfulUpdates);
oldDocs.keySet().removeAll(failedUpdates);
if (collection == Collection.NODES) {
for (T doc : docsToUpdate) {
String id = doc.getId();
if (successfulUpdates.contains(id)) {
if (oldDocs.containsKey(id)) {
nodesCache.replaceCachedDocument((NodeDocument) oldDocs.get(id), (NodeDocument) doc);
} else {
nodesCache.putIfAbsent((NodeDocument) doc);
}
}
}
}
Map<UpdateOp, T> result = new HashMap<UpdateOp, T>();
for (UpdateOp op : updates) {
if (successfulUpdates.contains(op.getId())) {
result.put(op, oldDocs.get(op.getId()));
}
}
return result;
} catch (SQLException ex) {
this.ch.rollbackConnection(connection);
throw handleException("update failed for: " + keysToUpdate, ex, collection, keysToUpdate);
} finally {
this.ch.closeConnection(connection);
}
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.
the class MongoDocumentStore method bulkUpdate.
private <T extends Document> Map<UpdateOp, T> bulkUpdate(Collection<T> collection, List<UpdateOp> updateOperations, Map<String, T> oldDocs) {
Map<String, UpdateOp> bulkOperations = createMap(updateOperations);
Set<String> lackingDocs = difference(bulkOperations.keySet(), oldDocs.keySet());
oldDocs.putAll(findDocuments(collection, lackingDocs));
CacheChangesTracker tracker = null;
if (collection == Collection.NODES) {
tracker = nodesCache.registerTracker(bulkOperations.keySet());
}
try {
BulkUpdateResult bulkResult = sendBulkUpdate(collection, bulkOperations.values(), oldDocs);
if (collection == Collection.NODES) {
List<NodeDocument> docsToCache = new ArrayList<NodeDocument>();
for (UpdateOp op : filterKeys(bulkOperations, in(bulkResult.upserts)).values()) {
NodeDocument doc = Collection.NODES.newDocument(this);
UpdateUtils.applyChanges(doc, op);
docsToCache.add(doc);
}
for (String key : difference(bulkOperations.keySet(), bulkResult.failedUpdates)) {
T oldDoc = oldDocs.get(key);
if (oldDoc != null && oldDoc != NodeDocument.NULL) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, bulkOperations.get(key));
docsToCache.add(newDoc);
}
}
for (NodeDocument doc : docsToCache) {
updateLocalChanges(doc);
}
nodesCache.putNonConflictingDocs(tracker, docsToCache);
}
oldDocs.keySet().removeAll(bulkResult.failedUpdates);
Map<UpdateOp, T> result = new HashMap<UpdateOp, T>();
for (Entry<String, UpdateOp> entry : bulkOperations.entrySet()) {
if (bulkResult.failedUpdates.contains(entry.getKey())) {
continue;
} else if (bulkResult.upserts.contains(entry.getKey())) {
result.put(entry.getValue(), null);
} else {
result.put(entry.getValue(), oldDocs.get(entry.getKey()));
}
}
return result;
} finally {
if (tracker != null) {
tracker.close();
}
}
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.
the class MongoDocumentStore method createOrUpdate.
/**
* Try to apply all the {@link UpdateOp}s with at least MongoDB requests as
* possible. The return value is the list of the old documents (before
* applying changes). The mechanism is as follows:
*
* <ol>
* <li>For each UpdateOp try to read the assigned document from the cache.
* Add them to {@code oldDocs}.</li>
* <li>Prepare a list of all UpdateOps that doesn't have their documents and
* read them in one find() call. Add results to {@code oldDocs}.</li>
* <li>Prepare a bulk update. For each remaining UpdateOp add following
* operation:
* <ul>
* <li>Find document with the same id and the same mod_count as in the
* {@code oldDocs}.</li>
* <li>Apply changes from the UpdateOps.</li>
* </ul>
* </li>
* <li>Execute the bulk update.</li>
* </ol>
*
* If some other process modifies the target documents between points 2 and
* 3, the mod_count will be increased as well and the bulk update will fail
* for the concurrently modified docs. The method will then remove the
* failed documents from the {@code oldDocs} and restart the process from
* point 2. It will stop after 3rd iteration.
*/
@SuppressWarnings("unchecked")
@CheckForNull
@Override
public <T extends Document> List<T> createOrUpdate(Collection<T> collection, List<UpdateOp> updateOps) {
log("createOrUpdate", updateOps);
Map<String, UpdateOp> operationsToCover = new LinkedHashMap<String, UpdateOp>();
List<UpdateOp> duplicates = new ArrayList<UpdateOp>();
Map<UpdateOp, T> results = new LinkedHashMap<UpdateOp, T>();
final Stopwatch watch = startWatch();
try {
for (UpdateOp updateOp : updateOps) {
UpdateUtils.assertUnconditional(updateOp);
UpdateOp clone = updateOp.copy();
if (operationsToCover.containsKey(updateOp.getId())) {
duplicates.add(clone);
} else {
operationsToCover.put(updateOp.getId(), clone);
}
results.put(clone, null);
}
Map<String, T> oldDocs = new HashMap<String, T>();
if (collection == Collection.NODES) {
oldDocs.putAll((Map<String, T>) getCachedNodes(operationsToCover.keySet()));
}
for (int i = 0; i <= bulkRetries; i++) {
if (operationsToCover.size() <= 2) {
// in bulk mode wouldn't result in any performance gain
break;
}
for (List<UpdateOp> partition : Lists.partition(Lists.newArrayList(operationsToCover.values()), bulkSize)) {
Map<UpdateOp, T> successfulUpdates = bulkUpdate(collection, partition, oldDocs);
results.putAll(successfulUpdates);
operationsToCover.values().removeAll(successfulUpdates.keySet());
}
}
// if there are some changes left, we'll apply them one after another
Iterator<UpdateOp> it = Iterators.concat(operationsToCover.values().iterator(), duplicates.iterator());
while (it.hasNext()) {
UpdateOp op = it.next();
it.remove();
T oldDoc = createOrUpdate(collection, op);
if (oldDoc != null) {
results.put(op, oldDoc);
}
}
} catch (MongoException e) {
throw handleException(e, collection, Iterables.transform(updateOps, new Function<UpdateOp, String>() {
@Override
public String apply(UpdateOp input) {
return input.getId();
}
}));
} finally {
stats.doneCreateOrUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, Lists.transform(updateOps, new Function<UpdateOp, String>() {
@Override
public String apply(UpdateOp input) {
return input.getId();
}
}));
}
List<T> resultList = new ArrayList<T>(results.values());
log("createOrUpdate returns", resultList);
return resultList;
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp in project jackrabbit-oak by apache.
the class MongoDocumentStore method sendBulkUpdate.
private <T extends Document> BulkUpdateResult sendBulkUpdate(Collection<T> collection, java.util.Collection<UpdateOp> updateOps, Map<String, T> oldDocs) {
DBCollection dbCollection = getDBCollection(collection);
BulkWriteOperation bulk = dbCollection.initializeUnorderedBulkOperation();
String[] bulkIds = new String[updateOps.size()];
int i = 0;
for (UpdateOp updateOp : updateOps) {
String id = updateOp.getId();
QueryBuilder query = createQueryForUpdate(id, updateOp.getConditions());
T oldDoc = oldDocs.get(id);
DBObject update;
if (oldDoc == null || oldDoc == NodeDocument.NULL) {
query.and(Document.MOD_COUNT).exists(false);
update = createUpdate(updateOp, true);
} else {
query.and(Document.MOD_COUNT).is(oldDoc.getModCount());
update = createUpdate(updateOp, false);
}
bulk.find(query.get()).upsert().updateOne(update);
bulkIds[i++] = id;
}
BulkWriteResult bulkResult;
Set<String> failedUpdates = new HashSet<String>();
Set<String> upserts = new HashSet<String>();
try {
bulkResult = bulk.execute();
} catch (BulkWriteException e) {
bulkResult = e.getWriteResult();
for (BulkWriteError err : e.getWriteErrors()) {
failedUpdates.add(bulkIds[err.getIndex()]);
}
}
for (BulkWriteUpsert upsert : bulkResult.getUpserts()) {
upserts.add(bulkIds[upsert.getIndex()]);
}
return new BulkUpdateResult(failedUpdates, upserts);
}
Aggregations