use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class ClusterInfoTest method readWriteMode.
@Test
public void readWriteMode() throws InterruptedException {
MemoryDocumentStore mem = new MemoryDocumentStore();
Clock clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
ClusterNodeInfo.setClock(clock);
DocumentNodeStore ns1 = new DocumentMK.Builder().setDocumentStore(mem).setAsyncDelay(0).setLeaseCheck(false).setClusterId(1).getNodeStore();
DocumentNodeStore ns2 = new DocumentMK.Builder().setDocumentStore(mem).setAsyncDelay(0).setLeaseCheck(false).setClusterId(2).getNodeStore();
// Bring the current time forward to after the leaseTime which would have been
// updated in the DocumentNodeStore initialization.
clock.waitUntil(clock.getTime() + ns1.getClusterInfo().getLeaseTime());
ns1.getClusterInfo().setLeaseTime(0);
ns1.getClusterInfo().setLeaseUpdateInterval(0);
ns2.getClusterInfo().setLeaseTime(0);
ns2.getClusterInfo().setLeaseUpdateInterval(0);
List<ClusterNodeInfoDocument> list = mem.query(Collection.CLUSTER_NODES, "0", "a", Integer.MAX_VALUE);
assertEquals(2, list.size());
assertNull(mem.getReadPreference());
assertNull(mem.getWriteConcern());
mem.setReadWriteMode("read:primary, write:majority");
assertEquals(ReadPreference.primary(), mem.getReadPreference());
assertEquals(WriteConcern.MAJORITY, mem.getWriteConcern());
UpdateOp op;
// unknown modes: ignore
op = new UpdateOp(list.get(0).getId(), false);
op.set("readWriteMode", "read:xyz, write:abc");
mem.findAndUpdate(Collection.CLUSTER_NODES, op);
ns1.renewClusterIdLease();
assertEquals(ReadPreference.primary(), mem.getReadPreference());
assertEquals(WriteConcern.MAJORITY, mem.getWriteConcern());
op = new UpdateOp(list.get(0).getId(), false);
op.set("readWriteMode", "read:nearest, write:fsynced");
mem.findAndUpdate(Collection.CLUSTER_NODES, op);
ns1.renewClusterIdLease();
assertEquals(ReadPreference.nearest(), mem.getReadPreference());
assertEquals(WriteConcern.FSYNCED, mem.getWriteConcern());
ns1.dispose();
ns2.dispose();
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class ClusterInfoTest method renewLease.
@Test
public void renewLease() throws InterruptedException {
MemoryDocumentStore mem = new MemoryDocumentStore();
Clock clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
ClusterNodeInfo.setClock(clock);
DocumentNodeStore ns = new DocumentMK.Builder().setDocumentStore(mem).setAsyncDelay(0).setLeaseCheck(false).getNodeStore();
ClusterNodeInfo info = ns.getClusterInfo();
assertNotNull(info);
// current lease end
long leaseEnd = getLeaseEndTime(ns);
// wait a bit, 1sec less than leaseUpdateTime (10sec-1sec by default)
clock.waitUntil(clock.getTime() + ClusterNodeInfo.DEFAULT_LEASE_UPDATE_INTERVAL_MILLIS - 1000);
// must not renew lease right now
ns.renewClusterIdLease();
assertEquals(leaseEnd, getLeaseEndTime(ns));
// wait some more time
clock.waitUntil(clock.getTime() + 2000);
// now the lease must be renewed
ns.renewClusterIdLease();
assertTrue(getLeaseEndTime(ns) > leaseEnd);
ns.dispose();
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class ConcurrentConflictTest method initDocumentMK.
@Before
@Override
public void initDocumentMK() {
logBuffer.setLength(0);
this.store = new MemoryDocumentStore();
DocumentMK mk = openDocumentMK(1);
for (int i = 0; i < NUM_NODES; i++) {
mk.commit("/", "+\"node-" + i + "\":{\"value\":100}", null, null);
}
mk.dispose();
for (int i = 0; i < NUM_WRITERS; i++) {
kernels.add(openDocumentMK(i + 2));
}
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreSweepTest method lowerSweepLimit.
@Test
public void lowerSweepLimit() throws Exception {
ns.dispose();
// restart with a document store that tracks queries
final Map<String, Long> queries = Maps.newHashMap();
store = new FailingDocumentStore(new MemoryDocumentStore() {
@Nonnull
@Override
public <T extends Document> List<T> query(Collection<T> collection, String fromKey, String toKey, String indexedProperty, long startValue, int limit) {
queries.put(indexedProperty, startValue);
return super.query(collection, fromKey, toKey, indexedProperty, startValue, limit);
}
});
ns = createDocumentNodeStore(0);
createUncommittedChanges();
// get the revision of the uncommitted changes
Revision r = null;
for (NodeDocument d : Utils.getAllDocuments(store)) {
if (d.getPath().startsWith("/node-")) {
r = Iterables.getFirst(d.getAllChanges(), null);
break;
}
}
assertNotNull(r);
// after a new head and a background sweep, the
// uncommitted changes must be cleaned up
NodeBuilder builder = ns.getRoot().builder();
builder.child("foo");
merge(ns, builder);
queries.clear();
ns.runBackgroundOperations();
assertCleanStore();
// sweeper must have looked at most recently modified documents
Long modified = queries.get(NodeDocument.MODIFIED_IN_SECS);
assertNotNull(modified);
long startValue = NodeDocument.getModifiedInSecs(r.getTimestamp());
assertEquals(startValue, modified.longValue());
}
use of org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreTest method localChangesFromCache.
// OAK-4715
@Test
public void localChangesFromCache() throws Exception {
final AtomicInteger numQueries = new AtomicInteger();
DocumentStore store = new MemoryDocumentStore() {
@Nonnull
@Override
public <T extends Document> List<T> query(Collection<T> collection, String fromKey, String toKey, int limit) {
if (collection == Collection.NODES) {
numQueries.incrementAndGet();
}
return super.query(collection, fromKey, toKey, limit);
}
};
DocumentNodeStore ns1 = builderProvider.newBuilder().setClusterId(1).setAsyncDelay(0).setDocumentStore(store).getNodeStore();
NodeBuilder builder = ns1.getRoot().builder();
builder.child("node-1");
merge(ns1, builder);
ns1.runBackgroundOperations();
DocumentNodeStore ns2 = builderProvider.newBuilder().setClusterId(2).setAsyncDelay(0).setDocumentStore(store).getNodeStore();
builder = ns2.getRoot().builder();
builder.child("node-2");
merge(ns2, builder);
ns2.runBackgroundOperations();
ns1.runBackgroundOperations();
NodeState before = ns1.getRoot();
builder = before.builder();
builder.child("node-1").child("foo").child("bar");
NodeState after = merge(ns1, builder);
numQueries.set(0);
JsopDiff.diffToJsop(before, after);
assertEquals(0, numQueries.get());
before = after;
builder = ns1.getRoot().builder();
builder.child("node-1").child("foo").child("bar").setProperty("p", 1);
after = merge(ns1, builder);
numQueries.set(0);
JsopDiff.diffToJsop(before, after);
assertEquals(0, numQueries.get());
before = after;
builder = ns1.getRoot().builder();
builder.child("node-1").child("foo").child("bar").remove();
after = merge(ns1, builder);
numQueries.set(0);
JsopDiff.diffToJsop(before, after);
assertEquals(0, numQueries.get());
}
Aggregations