use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class ReadOnlyDocumentNodeStoreTest method readOnlyCompare.
@Test
public void readOnlyCompare() throws Exception {
DocumentStore store = new MemoryDocumentStore();
DocumentNodeStore ns1 = builderProvider.newBuilder().setDocumentStore(store).setAsyncDelay(0).getNodeStore();
// read-only
DocumentNodeStore ns2 = builderProvider.newBuilder().setDocumentStore(store).setAsyncDelay(0).setReadOnlyMode().getNodeStore();
NodeBuilder builder = ns1.getRoot().builder();
builder.child("foo");
merge(ns1, builder);
ns1.runBackgroundOperations();
DocumentNodeState s1 = ns2.getRoot();
ns2.runBackgroundOperations();
builder = ns1.getRoot().builder();
builder.child("bar");
merge(ns1, builder);
ns1.runBackgroundOperations();
ns2.runBackgroundOperations();
DocumentNodeState s2 = ns2.getRoot();
TrackingDiff diff = new TrackingDiff();
s2.compareAgainstBaseState(s1, diff);
assertThat(diff.deleted, is(empty()));
assertThat(diff.modified, is(empty()));
assertThat(diff.added, containsInAnyOrder("/foo", "/bar"));
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class VersionGCDeletionTest method deleteLargeNumber.
@Test
public void deleteLargeNumber() throws Exception {
int noOfDocsToDelete = 10000;
DocumentStore ts = new MemoryDocumentStore();
store = new DocumentMK.Builder().clock(clock).setDocumentStore(new MemoryDocumentStore()).setAsyncDelay(0).getNodeStore();
NodeBuilder b1 = store.getRoot().builder();
NodeBuilder xb = b1.child("x");
for (int i = 0; i < noOfDocsToDelete; i++) {
xb.child("a" + i).child("b" + i);
}
store.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
//hours
long maxAge = 1;
long delta = TimeUnit.MINUTES.toMillis(10);
//Remove x/y
NodeBuilder b2 = store.getRoot().builder();
b2.child("x").remove();
store.merge(b2, EmptyHook.INSTANCE, CommitInfo.EMPTY);
store.runBackgroundOperations();
//3. Check that deleted doc does get collected post maxAge
clock.waitUntil(clock.getTime() + HOURS.toMillis(maxAge * 2) + delta);
VersionGarbageCollector gc = store.getVersionGarbageCollector();
gc.setOptions(gc.getOptions().withOverflowToDiskThreshold(100));
VersionGCStats stats = gc.gc(maxAge * 2, HOURS);
assertEquals(noOfDocsToDelete * 2 + 1, stats.deletedDocGCCount);
assertEquals(noOfDocsToDelete, stats.deletedLeafDocGCCount);
assertNull(ts.find(Collection.NODES, "1:/x"));
for (int i = 0; i < noOfDocsToDelete; i++) {
assertNull(ts.find(Collection.NODES, "2:/a" + i + "/b" + i));
assertNull(ts.find(Collection.NODES, "1:/a" + i));
}
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class VersionGCDeletionTest method gcWithPathsHavingNewLine.
@Test
public void gcWithPathsHavingNewLine() throws Exception {
int noOfDocsToDelete = 200;
DocumentStore ts = new MemoryDocumentStore();
store = new DocumentMK.Builder().clock(clock).setDocumentStore(new MemoryDocumentStore()).setAsyncDelay(0).getNodeStore();
NodeBuilder b1 = store.getRoot().builder();
NodeBuilder xb = b1.child("x");
for (int i = 0; i < noOfDocsToDelete - 1; i++) {
xb.child("a" + i).child("b" + i);
}
xb.child("a-1").child("b\r");
store.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
//hours
long maxAge = 1;
long delta = TimeUnit.MINUTES.toMillis(10);
//Remove x/y
NodeBuilder b2 = store.getRoot().builder();
b2.child("x").remove();
store.merge(b2, EmptyHook.INSTANCE, CommitInfo.EMPTY);
store.runBackgroundOperations();
//3. Check that deleted doc does get collected post maxAge
clock.waitUntil(clock.getTime() + HOURS.toMillis(maxAge * 2) + delta);
VersionGarbageCollector gc = store.getVersionGarbageCollector();
gc.setOptions(gc.getOptions().withOverflowToDiskThreshold(100));
VersionGCStats stats = gc.gc(maxAge * 2, HOURS);
assertEquals(noOfDocsToDelete * 2 + 1, stats.deletedDocGCCount);
assertEquals(noOfDocsToDelete, stats.deletedLeafDocGCCount);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class NodeDocumentTest method tooManyReadsOnGetVisibleChanges.
// OAK-5207
@Test
public void tooManyReadsOnGetVisibleChanges() throws Exception {
final int numChanges = 500;
final Set<String> prevDocCalls = newHashSet();
MemoryDocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T find(Collection<T> collection, String key) {
if (Utils.getPathFromId(key).startsWith("p")) {
prevDocCalls.add(key);
}
return super.find(collection, key);
}
};
Random random = new Random(42);
DocumentNodeStore ns1 = createTestStore(store, 1, 0);
DocumentNodeStore ns2 = createTestStore(store, 2, 0);
List<DocumentNodeStore> nodeStores = Lists.newArrayList(ns1, ns2);
List<RevisionVector> headRevisions = Lists.reverse(createTestData(nodeStores, random, numChanges));
NodeDocument doc = getRootDocument(store);
for (int i = 0; i < 20; i++) {
prevDocCalls.clear();
String value = doc.getVisibleChanges("p", headRevisions.get(i)).iterator().next().getValue();
assertEquals(String.valueOf(numChanges - (i + 1)), value);
assertTrue("too many calls for previous documents: " + prevDocCalls, prevDocCalls.size() <= 3);
}
ns1.dispose();
ns2.dispose();
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class NodeDocumentTest method tooManyReadsOnGetNewestRevision.
// OAK-4358
@Test
public void tooManyReadsOnGetNewestRevision() throws Exception {
final Set<String> prevDocCalls = newHashSet();
MemoryDocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T find(Collection<T> collection, String key) {
if (Utils.getPathFromId(key).startsWith("p")) {
prevDocCalls.add(key);
}
return super.find(collection, key);
}
};
DocumentNodeStore ns1 = createTestStore(store, 1, 0);
DocumentNodeStore ns2 = createTestStore(store, 2, 0);
// create a test node on ns1
NodeBuilder b1 = ns1.getRoot().builder();
b1.child("test");
merge(ns1, b1);
ns1.runBackgroundOperations();
ns2.runBackgroundOperations();
// split off some changes
for (int i = 0; i < 3; i++) {
NodeBuilder b2 = ns2.getRoot().builder();
b2.child("test").setProperty("ns2", i);
merge(ns2, b2);
}
String testId = Utils.getIdFromPath("/test");
NodeDocument test = ns2.getDocumentStore().find(NODES, testId);
assertNotNull(test);
List<UpdateOp> ops = SplitOperations.forDocument(test, ns2, ns2.getHeadRevision(), NO_BINARY, 2);
assertEquals(2, ops.size());
for (UpdateOp op : ops) {
ns2.getDocumentStore().createOrUpdate(NODES, op);
}
ns2.runBackgroundOperations();
ns1.runBackgroundOperations();
List<RevisionVector> headRevs = Lists.newArrayList();
// perform many changes on ns1 and split
for (int i = 0; i < 20; i++) {
b1 = ns1.getRoot().builder();
b1.child("test").setProperty("ns1", i);
merge(ns1, b1);
test = ns1.getDocumentStore().find(NODES, testId);
for (UpdateOp op : SplitOperations.forDocument(test, ns1, ns1.getHeadRevision(), NO_BINARY, 3)) {
ns1.getDocumentStore().createOrUpdate(NODES, op);
}
headRevs.add(ns1.getHeadRevision());
}
int numPrevDocs = Iterators.size(test.getPreviousDocLeaves());
assertEquals(10, numPrevDocs);
// getNewestRevision must not read all previous documents
prevDocCalls.clear();
// simulate a call done by a commit with a
// base revision somewhat in the past
test.getNewestRevision(ns1, headRevs.get(16), ns1.newRevision(), null, new HashSet<Revision>());
// must only read one previous document for ns1 changes
assertEquals(1, prevDocCalls.size());
}
Aggregations