use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method diffOnce.
// OAK-1782
@Test
public void diffOnce() throws Exception {
final AtomicInteger numQueries = new AtomicInteger();
MemoryDocumentStore store = new MemoryDocumentStore() {
@Nonnull
@Override
public <T extends Document> List<T> query(Collection<T> collection, String fromKey, String toKey, String indexedProperty, long startValue, int limit) {
numQueries.getAndIncrement();
return super.query(collection, fromKey, toKey, indexedProperty, startValue, limit);
}
};
final DocumentMK mk = builderProvider.newBuilder().setDocumentStore(store).open();
final DocumentNodeStore ns = mk.getNodeStore();
NodeBuilder builder = ns.getRoot().builder();
// make sure we have enough children to trigger diffManyChildren
for (int i = 0; i < DocumentMK.MANY_CHILDREN_THRESHOLD * 2; i++) {
builder.child("node-" + i);
}
merge(ns, builder);
final RevisionVector head = ns.getHeadRevision();
Revision localHead = head.getRevision(ns.getClusterId());
assertNotNull(localHead);
final RevisionVector to = new RevisionVector(new Revision(localHead.getTimestamp() + 1000, 0, localHead.getClusterId()));
int numReaders = 10;
final CountDownLatch ready = new CountDownLatch(numReaders);
final CountDownLatch go = new CountDownLatch(1);
List<Thread> readers = Lists.newArrayList();
for (int i = 0; i < numReaders; i++) {
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
ready.countDown();
go.await();
mk.diff(head.toString(), to.toString(), "/", 0);
} catch (InterruptedException e) {
// ignore
}
}
});
readers.add(t);
t.start();
}
ready.await();
numQueries.set(0);
go.countDown();
for (Thread t : readers) {
t.join();
}
// must not perform more than two queries
// 1) query the first 50 children to find out there are many
// 2) query for the changed children between the two revisions
assertTrue(numQueries.get() <= 2);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentSplitTest method cluster.
@Test
public void cluster() {
MemoryDocumentStore ds = new MemoryDocumentStore();
MemoryBlobStore bs = new MemoryBlobStore();
DocumentMK.Builder builder;
builder = new DocumentMK.Builder();
builder.setDocumentStore(ds).setBlobStore(bs).setAsyncDelay(0);
DocumentMK mk1 = builder.setClusterId(1).open();
mk1.commit("/", "+\"test\":{\"prop1\":0}", null, null);
// make sure the new node is visible to other DocumentMK instances
mk1.backgroundWrite();
builder = new DocumentMK.Builder();
builder.setDocumentStore(ds).setBlobStore(bs).setAsyncDelay(0);
DocumentMK mk2 = builder.setClusterId(2).open();
builder = new DocumentMK.Builder();
builder.setDocumentStore(ds).setBlobStore(bs).setAsyncDelay(0);
DocumentMK mk3 = builder.setClusterId(3).open();
for (int i = 0; i < NodeDocument.NUM_REVS_THRESHOLD; i++) {
mk1.commit("/", "^\"test/prop1\":" + i, null, null);
mk2.commit("/", "^\"test/prop2\":" + i, null, null);
mk3.commit("/", "^\"test/prop3\":" + i, null, null);
}
mk1.runBackgroundOperations();
mk2.runBackgroundOperations();
mk3.runBackgroundOperations();
NodeDocument doc = ds.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
Map<Revision, String> revs = doc.getLocalRevisions();
assertEquals(3, revs.size());
revs = doc.getValueMap("_revisions");
assertEquals(3 * NodeDocument.NUM_REVS_THRESHOLD, revs.size());
Revision previous = null;
for (Map.Entry<Revision, String> entry : revs.entrySet()) {
if (previous != null) {
assertTrue(previous.compareRevisionTimeThenClusterId(entry.getKey()) > 0);
}
previous = entry.getKey();
}
mk1.dispose();
mk2.dispose();
mk3.dispose();
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method parentWithUnseenChildrenMustNotBeDeleted.
// OAK-2929
@Test
public void parentWithUnseenChildrenMustNotBeDeleted() throws Exception {
final MemoryDocumentStore docStore = new MemoryDocumentStore();
final DocumentNodeStore store1 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(1).getNodeStore();
store1.setEnableConcurrentAddRemove(true);
final DocumentNodeStore store2 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(2).getNodeStore();
store2.setEnableConcurrentAddRemove(true);
NodeBuilder builder = store1.getRoot().builder();
builder.child(":hidden");
merge(store1, builder);
store1.runBackgroundOperations();
store2.runBackgroundOperations();
builder = store1.getRoot().builder();
builder.child(":hidden").child("parent").child("node1");
merge(store1, builder);
builder = store2.getRoot().builder();
builder.child(":hidden").child("parent").child("node2");
merge(store2, builder);
//Test 1 - parent shouldn't be removable if order of operation is:
//# N1 and N2 know about /:hidden
//# N1->create(/:hidden/parent/node1)
//# N2->create(/:hidden/parent/node2)
//# N1->remove(/:hidden/parent)
builder = store1.getRoot().builder();
builder.child(":hidden").child("parent").remove();
try {
merge(store1, builder);
fail("parent node of unseen children must not get deleted");
} catch (CommitFailedException cfe) {
//this merge should fail -- but our real check is done by asserting that parent remains intact
}
String parentPath = "/:hidden/parent";
NodeDocument parentDoc = docStore.find(Collection.NODES, Utils.getIdFromPath(parentPath));
assertFalse("parent node of unseen children must not get deleted", isDocDeleted(parentDoc, store1));
//Test 2 - parent shouldn't be removable if order of operation is:
//# N1 and N2 know about /:hidden
//# N1->create(/:hidden/parent/node1)
//# N2->create(/:hidden/parent/node2)
//# N2->remove(/:hidden/parent)
builder = store2.getRoot().builder();
builder.child(":hidden").child("parent").remove();
try {
merge(store2, builder);
fail("parent node of unseen children must not get deleted");
} catch (CommitFailedException cfe) {
//this merge should fail -- but our real check is done by asserting that parent remains intact
}
parentDoc = docStore.find(Collection.NODES, Utils.getIdFromPath(parentPath));
assertFalse("parent node of unseen children must not get deleted", isDocDeleted(parentDoc, store1));
store1.runBackgroundOperations();
store2.runBackgroundOperations();
builder = store1.getRoot().builder();
builder.child(":hidden").child("parent").remove();
builder.child(":hidden").child("parent1");
store1.runBackgroundOperations();
store2.runBackgroundOperations();
builder = store1.getRoot().builder();
builder.child(":hidden").child("parent1").child("node1");
merge(store1, builder);
builder = store2.getRoot().builder();
builder.child(":hidden").child("parent1").child("node2");
merge(store2, builder);
//Test 3 - parent shouldn't be removable if order of operation is:
//# N1 and N2 know about /:hidden/parent1
//# N1->create(/:hidden/parent1/node1)
//# N2->create(/:hidden/parent1/node2)
//# N1->remove(/:hidden/parent1)
builder = store1.getRoot().builder();
builder.child(":hidden").child("parent1").remove();
try {
merge(store1, builder);
} catch (CommitFailedException cfe) {
//this merge should fail -- but our real check is done by asserting that parent remains intact
}
parentPath = "/:hidden/parent1";
parentDoc = docStore.find(Collection.NODES, Utils.getIdFromPath(parentPath));
assertFalse("parent node of unseen children must not get deleted", isDocDeleted(parentDoc, store1));
//Test 4 - parent shouldn't be removable if order of operation is:
//# N1 and N2 know about /:hidden/parent1
//# N1->create(/:hidden/parent1/node1)
//# N2->create(/:hidden/parent1/node2)
//# N2->remove(/:hidden/parent1)
builder = store2.getRoot().builder();
builder.child(":hidden").child("parent1").remove();
try {
merge(store2, builder);
} catch (CommitFailedException cfe) {
//this merge should fail -- but our real check is done by asserting that parent remains intact
}
parentDoc = docStore.find(Collection.NODES, Utils.getIdFromPath(parentPath));
assertFalse("parent node of unseen children must not get deleted", isDocDeleted(parentDoc, store1));
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method concurrentChildOperations.
// OAK-3646
@Test
public void concurrentChildOperations() throws Exception {
Clock clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
Revision.setClock(clock);
MemoryDocumentStore store = new MemoryDocumentStore();
DocumentNodeStore ns1 = builderProvider.newBuilder().setAsyncDelay(0).clock(clock).setDocumentStore(store).setClusterId(1).getNodeStore();
DocumentNodeStore ns2 = builderProvider.newBuilder().setAsyncDelay(0).clock(clock).setDocumentStore(store).setClusterId(2).getNodeStore();
// create some children under /foo/bar
NodeBuilder b1 = ns1.getRoot().builder();
NodeBuilder node = b1.child("foo").child("bar");
node.child("child-0");
node.child("child-1");
node.child("child-2");
merge(ns1, b1);
// make changes visible on both cluster nodes
ns1.runBackgroundOperations();
ns2.runBackgroundOperations();
// remove child-0 on cluster node 1
b1 = ns1.getRoot().builder();
b1.child("foo").child("bar").getChildNode("child-0").remove();
merge(ns1, b1);
// push _lastRev updates to DocumentStore
ns1.runBackgroundOperations();
// remove child-1 on cluster node 2
NodeBuilder b2 = ns2.getRoot().builder();
b2.child("foo").child("bar").getChildNode("child-1").remove();
merge(ns2, b2);
// on cluster node 2, remove of child-0 is not yet visible
DocumentNodeState bar = asDocumentNodeState(ns2.getRoot().getChildNode("foo").getChildNode("bar"));
List<ChildNodeEntry> children = Lists.newArrayList(bar.getChildNodeEntries());
assertEquals(2, Iterables.size(children));
RevisionVector invalidate = bar.getLastRevision();
assertNotNull(invalidate);
// this will make changes from cluster node 1 visible
ns2.runBackgroundOperations();
// wait two hours
clock.waitUntil(clock.getTime() + TimeUnit.HOURS.toMillis(2));
// collect everything older than one hour
// this will remove child-0 and child-1 doc
ns1.getVersionGarbageCollector().gc(1, TimeUnit.HOURS);
// forget cache entry for deleted node
ns2.invalidateNodeCache("/foo/bar/child-0", invalidate);
children = Lists.newArrayList(ns2.getRoot().getChildNode("foo").getChildNode("bar").getChildNodeEntries());
assertEquals(1, Iterables.size(children));
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method readFromPreviousDoc.
// OAK-1972
@Test
public void readFromPreviousDoc() throws CommitFailedException {
DocumentStore docStore = new MemoryDocumentStore();
DocumentNodeStore ns = builderProvider.newBuilder().setDocumentStore(docStore).getNodeStore();
NodeBuilder builder = ns.getRoot().builder();
builder.child("test").setProperty("prop", "initial");
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ns.dispose();
ns = builderProvider.newBuilder().setClusterId(2).setAsyncDelay(0).setDocumentStore(docStore).getNodeStore();
builder = ns.getRoot().builder();
builder.child("test").setProperty("prop", "value");
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
RevisionVector rev = ns.getHeadRevision();
NodeDocument doc = docStore.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
DocumentNodeState state = doc.getNodeAtRevision(ns, rev, null);
assertNotNull(state);
assertTrue(state.hasProperty("prop"));
assertEquals("value", state.getProperty("prop").getValue(Type.STRING));
for (int i = 0; i < NUM_REVS_THRESHOLD; i++) {
builder = ns.getRoot().builder();
builder.child("test").setProperty("prop", "v-" + i);
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
ns.runBackgroundOperations();
// must still return the same value as before the split
doc = docStore.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
state = doc.getNodeAtRevision(ns, rev, null);
assertNotNull(state);
assertTrue(state.hasProperty("prop"));
assertEquals("value", state.getProperty("prop").getValue(Type.STRING));
}
Aggregations