use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class NodeDocumentTest method splitCollisions.
@Test
public void splitCollisions() throws Exception {
MemoryDocumentStore docStore = new MemoryDocumentStore();
String id = Utils.getPathFromId("/");
NodeDocument doc = new NodeDocument(docStore);
doc.put(Document.ID, id);
UpdateOp op = new UpdateOp(id, false);
for (int i = 0; i < NodeDocument.NUM_REVS_THRESHOLD + 1; i++) {
Revision r = Revision.newRevision(1);
NodeDocument.setRevision(op, r, "c");
NodeDocument.addCollision(op, r, Revision.newRevision(1));
}
UpdateUtils.applyChanges(doc, op);
RevisionVector head = DummyRevisionContext.INSTANCE.getHeadRevision();
doc.split(DummyRevisionContext.INSTANCE, head, NO_BINARY);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method mergeInternalDocAcrossCluster.
@Test
public void mergeInternalDocAcrossCluster() throws Exception {
MemoryDocumentStore docStore = new MemoryDocumentStore();
final DocumentNodeStore store1 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(1).getNodeStore();
store1.setEnableConcurrentAddRemove(true);
final DocumentNodeStore store2 = builderProvider.newBuilder().setDocumentStore(docStore).setAsyncDelay(0).setClusterId(2).getNodeStore();
store2.setEnableConcurrentAddRemove(true);
NodeState root;
NodeBuilder builder;
//Prepare repo
root = store1.getRoot();
builder = root.builder();
builder.child(":hidden").child("deleteDeleted");
builder.child(":hidden").child("deleteChanged");
builder.child(":hidden").child("changeDeleted");
merge(store1, builder);
store1.runBackgroundOperations();
store2.runBackgroundOperations();
//Changes in store1
root = store1.getRoot();
builder = root.builder();
builder.child("visible");
builder.child(":hidden").child("b");
builder.child(":hidden").child("deleteDeleted").remove();
builder.child(":hidden").child("changeDeleted").remove();
builder.child(":hidden").child("deleteChanged").setProperty("foo", "bar");
builder.child(":dynHidden").child("c");
builder.child(":dynHidden").child("childWithProp").setProperty("foo", "bar");
merge(store1, builder);
//Changes in store2
//root would hold reference to store2 root state after initial repo initialization
root = store2.getRoot();
//The hidden node and children should be creatable across cluster concurrently
builder = root.builder();
builder.child(":hidden").child("b");
builder.child(":dynHidden").child("c");
merge(store2, builder);
//Deleted deleted conflict of internal node should work across cluster concurrently
builder = root.builder();
builder.child(":hidden").child("deleteDeleted").remove();
merge(store2, builder);
//Avoid repeated merge tries ... fail early
store2.setMaxBackOffMillis(0);
boolean commitFailed = false;
try {
builder = root.builder();
builder.child("visible");
merge(store2, builder);
} catch (CommitFailedException cfe) {
commitFailed = true;
}
assertTrue("Concurrent creation of visible node across cluster must fail", commitFailed);
commitFailed = false;
try {
builder = root.builder();
builder.child(":dynHidden").child("childWithProp").setProperty("foo", "bar");
merge(store2, builder);
} catch (CommitFailedException cfe) {
commitFailed = true;
}
assertTrue("Concurrent creation of hidden node with properties across cluster must fail", commitFailed);
commitFailed = false;
try {
builder = root.builder();
builder.child(":hidden").child("deleteChanged").remove();
merge(store2, builder);
} catch (CommitFailedException cfe) {
commitFailed = true;
}
assertTrue("Delete changed merge across cluster must fail even under hidden tree", commitFailed);
commitFailed = false;
try {
builder = root.builder();
builder.child(":hidden").child("changeDeleted").setProperty("foo", "bar");
merge(store2, builder);
} catch (CommitFailedException cfe) {
commitFailed = true;
}
assertTrue("Change deleted merge across cluster must fail even under hidden tree", commitFailed);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method modifiedReset.
@Test
public void modifiedReset() throws Exception {
Clock clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
Revision.setClock(clock);
MemoryDocumentStore docStore = new MemoryDocumentStore();
DocumentNodeStore ns1 = builderProvider.newBuilder().setDocumentStore(docStore).setClusterId(1).setAsyncDelay(0).clock(clock).getNodeStore();
NodeBuilder builder1 = ns1.getRoot().builder();
builder1.child("node");
ns1.merge(builder1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ns1.runBackgroundOperations();
DocumentNodeStore ns2 = builderProvider.newBuilder().setDocumentStore(docStore).setClusterId(2).setAsyncDelay(0).clock(clock).getNodeStore();
NodeBuilder builder2 = ns2.getRoot().builder();
builder2.child("node").child("child-2");
ns2.merge(builder2, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// wait at least _modified resolution. in reality the wait may
// not be necessary. e.g. when the clock passes the resolution boundary
// exactly at this time
clock.waitUntil(System.currentTimeMillis() + SECONDS.toMillis(MODIFIED_IN_SECS_RESOLUTION + 1));
builder1 = ns1.getRoot().builder();
builder1.child("node").child("child-1");
ns1.merge(builder1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ns1.runBackgroundOperations();
// get current _modified timestamp on /node
NodeDocument doc = docStore.find(NODES, Utils.getIdFromPath("/node"));
Long mod1 = (Long) doc.get(MODIFIED_IN_SECS);
assertNotNull(mod1);
ns2.runBackgroundOperations();
doc = docStore.find(NODES, Utils.getIdFromPath("/node"));
Long mod2 = (Long) doc.get(MODIFIED_IN_SECS);
assertTrue("" + mod2 + " < " + mod1, mod2 >= mod1);
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentSplitTest method readLocalCommitInfo.
@Test
public void readLocalCommitInfo() throws Exception {
final Set<String> readSet = Sets.newHashSet();
DocumentStore store = new MemoryDocumentStore() {
@Override
public <T extends Document> T find(Collection<T> collection, String key, int maxCacheAge) {
readSet.add(key);
return super.find(collection, key, maxCacheAge);
}
};
DocumentNodeStore ns = new DocumentMK.Builder().setDocumentStore(store).setAsyncDelay(0).getNodeStore();
NodeBuilder builder = ns.getRoot().builder();
builder.child("test");
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
for (int i = 0; i < NUM_REVS_THRESHOLD; i++) {
builder = ns.getRoot().builder();
builder.setProperty("p", i);
builder.child("test").setProperty("p", i);
builder.child("test").setProperty("q", i);
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
builder = ns.getRoot().builder();
builder.child("test").removeProperty("q");
ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ns.runBackgroundOperations();
NodeDocument doc = store.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
readSet.clear();
// must not access previous document of /test
doc.getNodeAtRevision(ns, ns.getHeadRevision(), null);
for (String id : Sets.newHashSet(readSet)) {
doc = store.find(NODES, id);
assertNotNull(doc);
if (doc.isSplitDocument() && !doc.getMainPath().equals("/")) {
fail("must not access previous document: " + id);
}
}
ns.dispose();
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentSplitTest method manyRevisions.
// OAK-1233
@Test
public void manyRevisions() {
final int numMKs = 3;
MemoryDocumentStore ds = new MemoryDocumentStore();
MemoryBlobStore bs = new MemoryBlobStore();
List<Set<String>> changes = new ArrayList<Set<String>>();
List<DocumentMK> mks = new ArrayList<DocumentMK>();
for (int i = 1; i <= numMKs; i++) {
DocumentMK.Builder builder = new DocumentMK.Builder();
builder.setDocumentStore(ds).setBlobStore(bs).setAsyncDelay(0);
DocumentMK mk = builder.setClusterId(i).open();
mks.add(mk);
changes.add(new HashSet<String>());
if (i == 1) {
mk.commit("/", "+\"test\":{}", null, null);
mk.runBackgroundOperations();
}
}
List<String> propNames = Arrays.asList("prop1", "prop2", "prop3");
Random random = new Random(0);
for (int i = 0; i < 1000; i++) {
int mkIdx = random.nextInt(mks.size());
// pick mk
DocumentMK mk = mks.get(mkIdx);
DocumentNodeStore ns = mk.getNodeStore();
// pick property name to update
String name = propNames.get(random.nextInt(propNames.size()));
// need to sync?
for (int j = 0; j < changes.size(); j++) {
Set<String> c = changes.get(j);
if (c.contains(name)) {
syncMKs(mks, j);
c.clear();
break;
}
}
// read current value
NodeDocument doc = ds.find(NODES, Utils.getIdFromPath("/test"));
assertNotNull(doc);
RevisionVector head = ns.getHeadRevision();
Revision lastRev = ns.getPendingModifications().get("/test");
DocumentNodeState n = doc.getNodeAtRevision(mk.getNodeStore(), head, lastRev);
assertNotNull(n);
String value = n.getPropertyAsString(name);
// set or increment
if (value == null) {
value = String.valueOf(0);
} else {
value = String.valueOf(Integer.parseInt(value) + 1);
}
mk.commit("/test", "^\"" + name + "\":" + value, null, null);
changes.get(mkIdx).add(name);
}
for (DocumentMK mk : mks) {
mk.dispose();
}
}
Aggregations