use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method readChildrenWithDeletedSiblings.
// OAK-1861
@Test
public void readChildrenWithDeletedSiblings() throws Exception {
final AtomicInteger maxLimit = new AtomicInteger(0);
DocumentStore docStore = new MemoryDocumentStore() {
@Nonnull
@Override
public <T extends Document> List<T> query(Collection<T> collection, String fromKey, String toKey, int limit) {
if (collection == NODES) {
maxLimit.set(Math.max(limit, maxLimit.get()));
}
return super.query(collection, fromKey, toKey, limit);
}
};
DocumentNodeStore ns = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).getNodeStore();
NodeBuilder builder = ns.getRoot().builder();
for (int i = 0; i < 1000; i++) {
builder.child("node-" + i);
}
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// now remove all except the last one
for (int i = 0; i < 999; i++) {
builder = ns.getRoot().builder();
builder.getChildNode("node-" + i).remove();
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
for (ChildNodeEntry entry : ns.getRoot().getChildNodeEntries()) {
entry.getName();
}
// must not read more than DocumentNodeState.INITIAL_FETCH_SIZE + 1
assertTrue(maxLimit.get() + " > " + (DocumentNodeState.INITIAL_FETCH_SIZE + 1), maxLimit.get() <= DocumentNodeState.INITIAL_FETCH_SIZE + 1);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method backgroundRead.
// OAK-1254
@Test
public void backgroundRead() throws Exception {
final Semaphore semaphore = new Semaphore(1);
DocumentStore docStore = new MemoryDocumentStore();
DocumentStore testStore = new TimingDocumentStoreWrapper(docStore) {
@Override
public CacheInvalidationStats invalidateCache(Iterable<String> keys) {
super.invalidateCache(keys);
semaphore.acquireUninterruptibly();
semaphore.release();
return null;
}
};
final DocumentNodeStore store1 = builderProvider.newBuilder().setAsyncDelay(0).setDocumentStore(testStore).setClusterId(1).getNodeStore();
DocumentNodeStore store2 = builderProvider.newBuilder().setAsyncDelay(0).setDocumentStore(docStore).setClusterId(2).getNodeStore();
NodeBuilder builder = store2.getRoot().builder();
builder.child("node2");
store2.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// force update of _lastRevs
store2.runBackgroundOperations();
// at this point only node2 must not be visible
assertFalse(store1.getRoot().hasChildNode("node2"));
builder = store1.getRoot().builder();
builder.child("node1");
NodeState root = store1.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
semaphore.acquireUninterruptibly();
Thread t = new Thread(new Runnable() {
@Override
public void run() {
store1.runBackgroundOperations();
}
});
t.start();
// and is waiting for semaphore
while (!semaphore.hasQueuedThreads()) {
Thread.sleep(10);
}
// must still not be visible at this state
try {
assertFalse(root.hasChildNode("node2"));
} finally {
semaphore.release();
}
t.join();
// background operations completed
root = store1.getRoot();
// now node2 is visible
assertTrue(root.hasChildNode("node2"));
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method getNewestRevision2.
// OAK-3798
@Test
public void getNewestRevision2() throws Exception {
DocumentStore docStore = new MemoryDocumentStore();
DocumentNodeStore ns1 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(1).getNodeStore();
ns1.getRoot();
Revision r1 = ns1.getHeadRevision().getRevision(ns1.getClusterId());
ns1.runBackgroundOperations();
DocumentNodeStore ns2 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(2).getNodeStore();
ns2.getRoot();
NodeBuilder b1 = ns1.getRoot().builder();
for (int i = 0; i < NodeDocument.NUM_REVS_THRESHOLD; i++) {
b1.setProperty("p", String.valueOf(i));
ns1.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
ns1.runBackgroundOperations();
NodeDocument doc = docStore.find(NODES, Utils.getIdFromPath("/"));
assertNotNull(doc);
Revision newest = doc.getNewestRevision(ns2, ns2.getHeadRevision(), Revision.newRevision(ns2.getClusterId()), null, Sets.<Revision>newHashSet());
assertEquals(r1, newest);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method recoverBranchCommit.
// OAK-2308
@Test
public void recoverBranchCommit() throws Exception {
Clock clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
MemoryDocumentStore docStore = new MemoryDocumentStore();
DocumentNodeStore store1 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).clock(clock).setClusterId(1).getNodeStore();
NodeBuilder builder = store1.getRoot().builder();
builder.child("test");
merge(store1, builder);
// make sure all _lastRevs are written back
store1.runBackgroundOperations();
builder = store1.getRoot().builder();
NodeBuilder node = builder.getChildNode("test").child("node");
String id = Utils.getIdFromPath("/test/node");
int i = 0;
// force creation of a branch
while (docStore.find(NODES, id) == null) {
node.setProperty("foo", i++);
}
merge(store1, builder);
// wait until lease expires
clock.waitUntil(clock.getTime() + store1.getClusterInfo().getLeaseTime() + 1000);
// run recovery for this store
LastRevRecoveryAgent agent = store1.getLastRevRecoveryAgent();
assertTrue(agent.isRecoveryNeeded());
agent.recover(store1.getClusterId());
// start a second store
DocumentNodeStore store2 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).clock(clock).setClusterId(2).getNodeStore();
// must see /test/node
assertTrue(store2.getRoot().getChildNode("test").getChildNode("node").exists());
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method nonBlockingReset.
// OAK-2620
@Test
public void nonBlockingReset() throws Exception {
final List<String> failure = Lists.newArrayList();
final AtomicReference<ReentrantReadWriteLock> mergeLock = new AtomicReference<ReentrantReadWriteLock>();
MemoryDocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T findAndUpdate(Collection<T> collection, UpdateOp update) {
for (Map.Entry<Key, Operation> entry : update.getChanges().entrySet()) {
if (entry.getKey().getName().equals(NodeDocument.COLLISIONS)) {
ReentrantReadWriteLock rwLock = mergeLock.get();
if (rwLock.getReadHoldCount() > 0 || rwLock.getWriteHoldCount() > 0) {
failure.add("Branch reset still holds merge lock");
break;
}
}
}
return super.findAndUpdate(collection, update);
}
};
DocumentNodeStore ds = builderProvider.newBuilder().setDocumentStore(store).setAsyncDelay(0).getNodeStore();
// do not retry merges
ds.setMaxBackOffMillis(0);
DocumentNodeState root = ds.getRoot();
final DocumentNodeStoreBranch b = ds.createBranch(root);
// branch state is now Unmodified
assertTrue(b.getMergeLock() instanceof ReentrantReadWriteLock);
mergeLock.set((ReentrantReadWriteLock) b.getMergeLock());
NodeBuilder builder = root.builder();
builder.child("foo");
b.setRoot(builder.getNodeState());
// branch state is now InMemory
builder.child("bar");
b.setRoot(builder.getNodeState());
try {
b.merge(new CommitHook() {
@Nonnull
@Override
public NodeState processCommit(NodeState before, NodeState after, CommitInfo info) throws CommitFailedException {
NodeBuilder foo = after.builder().child("foo");
for (int i = 0; i <= DocumentMK.UPDATE_LIMIT; i++) {
foo.setProperty("prop", i);
}
throw new CommitFailedException("Fail", 0, "");
}
}, CommitInfo.EMPTY);
} catch (CommitFailedException e) {
// expected
}
for (String s : failure) {
fail(s);
}
}
Aggregations