use of org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key in project jackrabbit-oak by apache.
the class MongoDocumentStore method createQueryForUpdate.
@Nonnull
private static QueryBuilder createQueryForUpdate(String key, Map<Key, Condition> conditions) {
QueryBuilder query = getByKeyQuery(key);
for (Entry<Key, Condition> entry : conditions.entrySet()) {
Key k = entry.getKey();
Condition c = entry.getValue();
switch(c.type) {
case EXISTS:
query.and(k.toString()).exists(c.value);
break;
case EQUALS:
query.and(k.toString()).is(c.value);
break;
case NOTEQUALS:
query.and(k.toString()).notEquals(c.value);
break;
}
}
return query;
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key in project jackrabbit-oak by apache.
the class MongoDocumentStore method create.
@Override
public <T extends Document> boolean create(Collection<T> collection, List<UpdateOp> updateOps) {
log("create", updateOps);
List<T> docs = new ArrayList<T>();
DBObject[] inserts = new DBObject[updateOps.size()];
List<String> ids = Lists.newArrayListWithCapacity(updateOps.size());
for (int i = 0; i < updateOps.size(); i++) {
inserts[i] = new BasicDBObject();
UpdateOp update = updateOps.get(i);
inserts[i].put(Document.ID, update.getId());
UpdateUtils.assertUnconditional(update);
T target = collection.newDocument(this);
UpdateUtils.applyChanges(target, update);
docs.add(target);
ids.add(updateOps.get(i).getId());
for (Entry<Key, Operation> entry : update.getChanges().entrySet()) {
Key k = entry.getKey();
Operation op = entry.getValue();
switch(op.type) {
case SET:
case MAX:
case INCREMENT:
{
inserts[i].put(k.toString(), op.value);
break;
}
case SET_MAP_ENTRY:
{
Revision r = k.getRevision();
if (r == null) {
throw new IllegalStateException("SET_MAP_ENTRY must not have null revision");
}
DBObject value = (DBObject) inserts[i].get(k.getName());
if (value == null) {
value = new RevisionEntry(r, op.value);
inserts[i].put(k.getName(), value);
} else if (value.keySet().size() == 1) {
String key = value.keySet().iterator().next();
Object val = value.get(key);
value = new BasicDBObject(key, val);
value.put(r.toString(), op.value);
inserts[i].put(k.getName(), value);
} else {
value.put(r.toString(), op.value);
}
break;
}
case REMOVE:
case REMOVE_MAP_ENTRY:
// nothing to do for new entries
break;
}
}
if (!inserts[i].containsField(Document.MOD_COUNT)) {
inserts[i].put(Document.MOD_COUNT, 1L);
target.put(Document.MOD_COUNT, 1L);
}
}
DBCollection dbCollection = getDBCollection(collection);
final Stopwatch watch = startWatch();
boolean insertSuccess = false;
try {
try {
dbCollection.insert(inserts);
if (collection == Collection.NODES) {
for (T doc : docs) {
nodesCache.putIfAbsent((NodeDocument) doc);
updateLocalChanges((NodeDocument) doc);
}
}
insertSuccess = true;
return true;
} catch (MongoException e) {
return false;
}
} finally {
stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, insertSuccess);
}
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key in project jackrabbit-oak by apache.
the class CommitRootUpdateTest method exceptionOnUpdate.
@Test
public void exceptionOnUpdate() throws Exception {
final AtomicBoolean throwAfterUpdate = new AtomicBoolean(false);
MemoryDocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T findAndUpdate(Collection<T> collection, UpdateOp update) {
T doc = super.findAndUpdate(collection, update);
if (isFinalCommitRootUpdate(update) && throwAfterUpdate.compareAndSet(true, false)) {
throw new RuntimeException("communication failure");
}
return doc;
}
private boolean isFinalCommitRootUpdate(UpdateOp update) {
boolean finalUpdate = true;
for (Map.Entry<Key, Operation> op : update.getChanges().entrySet()) {
String name = op.getKey().getName();
if (NodeDocument.isRevisionsEntry(name) || NodeDocument.MODIFIED_IN_SECS.equals(name)) {
continue;
}
finalUpdate = false;
break;
}
return finalUpdate;
}
};
DocumentNodeStore ns = builderProvider.newBuilder().setDocumentStore(store).setAsyncDelay(0).getNodeStore();
NodeBuilder b = ns.getRoot().builder();
b.child("foo");
b.child("bar");
merge(ns, b);
throwAfterUpdate.set(true);
boolean success = false;
Commit c = ns.newCommit(ns.getHeadRevision(), null);
try {
c.addNode(new DocumentNodeState(ns, "/foo/node", c.getBaseRevision()));
c.addNode(new DocumentNodeState(ns, "/bar/node", c.getBaseRevision()));
c.apply();
success = true;
} finally {
if (success) {
ns.done(c, false, CommitInfo.EMPTY);
} else {
ns.canceled(c);
}
}
NodeState root = ns.getRoot();
assertTrue(root.getChildNode("foo").getChildNode("node").exists());
assertTrue(root.getChildNode("bar").getChildNode("node").exists());
assertFalse(throwAfterUpdate.get());
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key in project jackrabbit-oak by apache.
the class DocumentSplitTest method removeGarbage.
// OAK-3081
@Test
public void removeGarbage() throws Exception {
final DocumentStore store = mk.getDocumentStore();
final DocumentNodeStore ns = mk.getNodeStore();
final List<Exception> exceptions = Lists.newArrayList();
final List<RevisionVector> revisions = Lists.newArrayList();
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < 200; i++) {
NodeBuilder builder = ns.getRoot().builder();
builder.child("foo").child("node").child("node").child("node").child("node");
builder.child("bar").child("node").child("node").child("node").child("node");
merge(ns, builder);
revisions.add(ns.getHeadRevision());
builder = ns.getRoot().builder();
builder.child("foo").child("node").remove();
builder.child("bar").child("node").remove();
merge(ns, builder);
revisions.add(ns.getHeadRevision());
}
} catch (CommitFailedException e) {
exceptions.add(e);
}
}
});
t.start();
// Use a revision context, which wraps the DocumentNodeStore and
// randomly delays calls to get the head revision
RevisionContext rc = new TestRevisionContext(ns);
while (t.isAlive()) {
for (String id : ns.getSplitCandidates()) {
RevisionVector head = ns.getHeadRevision();
NodeDocument doc = store.find(NODES, id);
List<UpdateOp> ops = SplitOperations.forDocument(doc, rc, head, NO_BINARY, NUM_REVS_THRESHOLD);
Set<Revision> removed = Sets.newHashSet();
Set<Revision> added = Sets.newHashSet();
for (UpdateOp op : ops) {
for (Map.Entry<Key, Operation> e : op.getChanges().entrySet()) {
if (!"_deleted".equals(e.getKey().getName())) {
continue;
}
Revision r = e.getKey().getRevision();
if (e.getValue().type == Operation.Type.REMOVE_MAP_ENTRY) {
removed.add(r);
} else if (e.getValue().type == Operation.Type.SET_MAP_ENTRY) {
added.add(r);
}
}
}
removed.removeAll(added);
assertTrue("SplitOperations must not remove committed changes: " + removed, removed.isEmpty());
}
// perform the actual cleanup
ns.runBackgroundOperations();
}
// the _deleted map must contain all revisions
for (NodeDocument doc : Utils.getAllDocuments(store)) {
if (doc.isSplitDocument() || Utils.getDepthFromId(doc.getId()) < 2) {
continue;
}
Set<Revision> revs = Sets.newHashSet();
for (RevisionVector rv : revisions) {
Iterables.addAll(revs, rv);
}
revs.removeAll(doc.getValueMap("_deleted").keySet());
assertTrue("Missing _deleted entries on " + doc.getId() + ": " + revs, revs.isEmpty());
}
}
use of org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method nonBlockingReset.
// OAK-2620
@Test
public void nonBlockingReset() throws Exception {
final List<String> failure = Lists.newArrayList();
final AtomicReference<ReentrantReadWriteLock> mergeLock = new AtomicReference<ReentrantReadWriteLock>();
MemoryDocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T findAndUpdate(Collection<T> collection, UpdateOp update) {
for (Map.Entry<Key, Operation> entry : update.getChanges().entrySet()) {
if (entry.getKey().getName().equals(NodeDocument.COLLISIONS)) {
ReentrantReadWriteLock rwLock = mergeLock.get();
if (rwLock.getReadHoldCount() > 0 || rwLock.getWriteHoldCount() > 0) {
failure.add("Branch reset still holds merge lock");
break;
}
}
}
return super.findAndUpdate(collection, update);
}
};
DocumentNodeStore ds = builderProvider.newBuilder().setDocumentStore(store).setAsyncDelay(0).getNodeStore();
// do not retry merges
ds.setMaxBackOffMillis(0);
DocumentNodeState root = ds.getRoot();
final DocumentNodeStoreBranch b = ds.createBranch(root);
// branch state is now Unmodified
assertTrue(b.getMergeLock() instanceof ReentrantReadWriteLock);
mergeLock.set((ReentrantReadWriteLock) b.getMergeLock());
NodeBuilder builder = root.builder();
builder.child("foo");
b.setRoot(builder.getNodeState());
// branch state is now InMemory
builder.child("bar");
b.setRoot(builder.getNodeState());
try {
b.merge(new CommitHook() {
@Nonnull
@Override
public NodeState processCommit(NodeState before, NodeState after, CommitInfo info) throws CommitFailedException {
NodeBuilder foo = after.builder().child("foo");
for (int i = 0; i <= DocumentMK.UPDATE_LIMIT; i++) {
foo.setProperty("prop", i);
}
throw new CommitFailedException("Fail", 0, "");
}
}, CommitInfo.EMPTY);
} catch (CommitFailedException e) {
// expected
}
for (String s : failure) {
fail(s);
}
}
Aggregations