use of org.apache.jackrabbit.oak.plugins.document.NodeDocument in project jackrabbit-oak by apache.
the class MongoDocumentStore method bulkUpdate.
private <T extends Document> Map<UpdateOp, T> bulkUpdate(Collection<T> collection, List<UpdateOp> updateOperations, Map<String, T> oldDocs) {
Map<String, UpdateOp> bulkOperations = createMap(updateOperations);
Set<String> lackingDocs = difference(bulkOperations.keySet(), oldDocs.keySet());
oldDocs.putAll(findDocuments(collection, lackingDocs));
CacheChangesTracker tracker = null;
if (collection == Collection.NODES) {
tracker = nodesCache.registerTracker(bulkOperations.keySet());
}
try {
BulkUpdateResult bulkResult = sendBulkUpdate(collection, bulkOperations.values(), oldDocs);
if (collection == Collection.NODES) {
List<NodeDocument> docsToCache = new ArrayList<NodeDocument>();
for (UpdateOp op : filterKeys(bulkOperations, in(bulkResult.upserts)).values()) {
NodeDocument doc = Collection.NODES.newDocument(this);
UpdateUtils.applyChanges(doc, op);
docsToCache.add(doc);
}
for (String key : difference(bulkOperations.keySet(), bulkResult.failedUpdates)) {
T oldDoc = oldDocs.get(key);
if (oldDoc != null && oldDoc != NodeDocument.NULL) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, bulkOperations.get(key));
docsToCache.add(newDoc);
}
}
for (NodeDocument doc : docsToCache) {
updateLocalChanges(doc);
}
nodesCache.putNonConflictingDocs(tracker, docsToCache);
}
oldDocs.keySet().removeAll(bulkResult.failedUpdates);
Map<UpdateOp, T> result = new HashMap<UpdateOp, T>();
for (Entry<String, UpdateOp> entry : bulkOperations.entrySet()) {
if (bulkResult.failedUpdates.contains(entry.getKey())) {
continue;
} else if (bulkResult.upserts.contains(entry.getKey())) {
result.put(entry.getValue(), null);
} else {
result.put(entry.getValue(), oldDocs.get(entry.getKey()));
}
}
return result;
} finally {
if (tracker != null) {
tracker.close();
}
}
}
use of org.apache.jackrabbit.oak.plugins.document.NodeDocument in project jackrabbit-oak by apache.
the class RDBDocumentStore method internalQuery.
private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) {
Connection connection = null;
RDBTableMetaData tmd = getTable(collection);
for (QueryCondition cond : conditions) {
if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) {
String message = "indexed property " + cond.getPropertyName() + " not supported, query was '" + cond.getOperator() + "'" + cond.getValue() + "'; supported properties are " + INDEXEDPROPERTIES;
LOG.info(message);
throw new DocumentStoreException(message);
}
}
final Stopwatch watch = startWatch();
int resultSize = 0;
CacheChangesTracker tracker = null;
try {
if (collection == Collection.NODES) {
tracker = nodesCache.registerTracker(fromKey, toKey);
}
long now = System.currentTimeMillis();
connection = this.ch.getROConnection();
String from = collection == Collection.NODES && NodeDocument.MIN_ID_VALUE.equals(fromKey) ? null : fromKey;
String to = collection == Collection.NODES && NodeDocument.MAX_ID_VALUE.equals(toKey) ? null : toKey;
List<RDBRow> dbresult = db.query(connection, tmd, from, to, excludeKeyPatterns, conditions, limit);
connection.commit();
int size = dbresult.size();
List<T> result = new ArrayList<T>(size);
for (int i = 0; i < size; i++) {
// free RDBRow as early as possible
RDBRow row = dbresult.set(i, null);
T doc = getIfCached(collection, row.getId(), row.getModcount());
if (doc == null) {
// parse DB contents into document if and only if it's not
// already in the cache
doc = convertFromDBObject(collection, row);
} else {
// we got a document from the cache, thus collection is NODES
// and a tracker is present
long lastmodified = modifiedOf(doc);
if (lastmodified == row.getModified() && lastmodified >= 1) {
Lock lock = locks.acquire(row.getId());
try {
if (!tracker.mightBeenAffected(row.getId())) {
// otherwise mark it as fresh
((NodeDocument) doc).markUpToDate(now);
}
} finally {
lock.unlock();
}
} else {
// we need a fresh document instance
doc = convertFromDBObject(collection, row);
}
}
result.add(doc);
}
if (collection == Collection.NODES) {
nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result));
}
resultSize = result.size();
return result;
} catch (Exception ex) {
LOG.error("SQL exception on query", ex);
throw new DocumentStoreException(ex);
} finally {
if (tracker != null) {
tracker.close();
}
this.ch.closeConnection(connection);
stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey, !conditions.isEmpty(), resultSize, -1, false);
}
}
use of org.apache.jackrabbit.oak.plugins.document.NodeDocument in project jackrabbit-oak by apache.
the class CacheConsistencyIT method runTest.
private void runTest() throws Throwable {
addNodes(null, "/test", "/test/foo");
final List<Throwable> exceptions = Collections.synchronizedList(new ArrayList<Throwable>());
Thread t1 = new Thread(new Runnable() {
@Override
public void run() {
String id = Utils.getIdFromPath("/test/foo");
List<String> ids = Lists.newArrayList();
ids.add(id);
long v = 0;
while (exceptions.isEmpty()) {
try {
UpdateOp op = new UpdateOp(ids.get(0), false);
op.set("p", ++v);
store.update(NODES, ids, op);
NodeDocument doc = store.find(NODES, id);
Object p = doc.get("p");
assertEquals(v, ((Long) p).longValue());
} catch (Throwable e) {
exceptions.add(e);
}
}
}
}, "update");
t1.start();
Thread t2 = new Thread(new Runnable() {
@Override
public void run() {
String id = Utils.getIdFromPath("/test/foo");
long v = 0;
while (exceptions.isEmpty()) {
try {
UpdateOp op = new UpdateOp(id, false);
op.set("q", ++v);
NodeDocument old = store.findAndUpdate(NODES, op);
Object q = old.get("q");
if (q != null) {
assertEquals(v - 1, ((Long) q).longValue());
}
} catch (Throwable e) {
exceptions.add(e);
}
}
}
}, "findAndUpdate");
t2.start();
Thread t3 = new Thread(new Runnable() {
@Override
public void run() {
String id = Utils.getIdFromPath("/test/foo");
long p = 0;
long q = 0;
while (exceptions.isEmpty()) {
try {
NodeDocument doc = store.find(NODES, id);
if (doc != null) {
Object value = doc.get("p");
if (value != null) {
assertTrue((Long) value >= p);
p = (Long) value;
}
value = doc.get("q");
if (value != null) {
assertTrue("previous: " + q + ", now: " + value, (Long) value >= q);
q = (Long) value;
}
}
} catch (Throwable e) {
exceptions.add(e);
}
}
}
}, "reader");
t3.start();
NodeDocumentCache cache = store.getNodeDocumentCache();
// run for at most five seconds
long end = System.currentTimeMillis() + 1000;
String id = Utils.getIdFromPath("/test/foo");
while (t1.isAlive() && t2.isAlive() && t3.isAlive() && System.currentTimeMillis() < end) {
if (cache.getIfPresent(id) != null) {
Thread.sleep(0, (int) (Math.random() * 100));
// simulate eviction
cache.invalidate(id);
}
}
for (Throwable e : exceptions) {
throw e;
}
exceptions.add(new Exception("end"));
t1.join();
t2.join();
t3.join();
}
use of org.apache.jackrabbit.oak.plugins.document.NodeDocument in project jackrabbit-oak by apache.
the class CacheChangesTrackerTest method createDoc.
private NodeDocument createDoc(String id) {
NodeDocument doc = Collection.NODES.newDocument(ds);
doc.put("_id", id);
return doc;
}
use of org.apache.jackrabbit.oak.plugins.document.NodeDocument in project jackrabbit-oak by apache.
the class RDBExport method dumpRow.
@Nonnull
private static StringBuilder dumpRow(RDBDocumentSerializer ser, String id, RDBRow row) {
NodeDocument doc = ser.fromRow(Collection.NODES, row);
String docjson = ser.asString(doc);
StringBuilder fulljson = new StringBuilder();
fulljson.append("{\"_id\":\"");
JsopBuilder.escape(id, fulljson);
fulljson.append("\",");
fulljson.append(docjson.substring(1));
return fulljson;
}
Aggregations