use of org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore in project jackrabbit-oak by apache.
the class ClusterRepositoryInfoTest method checkChangeId.
@Test
public void checkChangeId() throws Exception {
MemoryDocumentStore store = new MemoryDocumentStore();
DocumentNodeStore ds1 = builderProvider.newBuilder().setAsyncDelay(0).setDocumentStore(store).setClusterId(1).getNodeStore();
String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1);
ds1.runBackgroundOperations();
// Change with a custom ID
setId(ds1, "xxxxxxxx");
String id = ClusterRepositoryInfo.getOrCreateId(ds1);
Assert.assertNotNull(id);
Assert.assertEquals(id, "xxxxxxxx");
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore in project jackrabbit-oak by apache.
the class ClusterRepositoryInfoTest method differentCluster.
@Test
public void differentCluster() throws Exception {
DocumentNodeStore ds1 = builderProvider.newBuilder().setAsyncDelay(0).setDocumentStore(new MemoryDocumentStore()).setBlobStore(blobStore).getNodeStore();
String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1);
DocumentNodeStore ds2 = builderProvider.newBuilder().setAsyncDelay(0).setDocumentStore(new MemoryDocumentStore()).setBlobStore(blobStore).getNodeStore();
String repoId2 = ClusterRepositoryInfo.getOrCreateId(ds2);
Assert.assertNotSame(repoId1, repoId2);
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore in project jackrabbit-oak by apache.
the class JournalIT method doLargeCleanupTest.
private void doLargeCleanupTest(int offset, int size) throws Exception {
Clock clock = new Clock.Virtual();
DocumentMK mk1 = createMK(0, /* clusterId: 0 => uses clusterNodes collection */
0, new MemoryDocumentStore(), new MemoryBlobStore());
DocumentNodeStore ns1 = mk1.getNodeStore();
// make sure we're visible and marked as active
renewClusterIdLease(ns1);
JournalGarbageCollector gc = new JournalGarbageCollector(ns1);
clock.getTimeIncreasing();
clock.getTimeIncreasing();
// cleanup everything that might still be there
gc.gc(0, TimeUnit.MILLISECONDS);
// create entries as parametrized:
for (int i = offset; i < size + offset; i++) {
mk1.commit("/", "+\"regular" + i + "\": {}", null, null);
// always run background ops to 'flush' the change
// into the journal:
ns1.runBackgroundOperations();
}
// sleep 100millis
Thread.sleep(100);
// should now be able to clean up everything
assertEquals(size, gc.gc(0, TimeUnit.MILLISECONDS));
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore in project jackrabbit-oak by apache.
the class ClusterConflictTest method mergeRetryWhileBackgroundRead.
// OAK-3433
@Test
public void mergeRetryWhileBackgroundRead() throws Exception {
DocumentNodeStore ns1 = mk.getNodeStore();
NodeBuilder b1 = ns1.getRoot().builder();
b1.child("a").child("b").child("c").child("foo");
merge(ns1, b1);
ns1.runBackgroundOperations();
ns2.runBackgroundOperations();
NodeBuilder b2 = ns2.getRoot().builder();
// force cache fill
assertNodeExists(b2, "/a/b/c/foo");
// remove /a/b/c on ns1
b1 = ns1.getRoot().builder();
b1.child("a").child("b").child("c").remove();
merge(ns1, b1);
// perform some change on ns2
b2.child("z");
merge(ns2, b2);
runBackgroundUpdate(ns2);
// this will pickup changes done by ns2 and update
// the head revision
runBackgroundRead(ns1);
// the next step is where the issue described
// in OAK-3433 occurs.
// the journal entry with changes done on ns1 is pushed
// with the current head revision, which is newer
// than the most recent change in the journal entry
runBackgroundUpdate(ns1);
// perform a background read after the rebase
// the first merge attempt will fail with a conflict
// because /a/b/c is seen as changed in the future
// without the fix for OAK-3433:
// the second merge attempt succeeds because now the
// /a/b/c change revision is visible and happened before the commit
// revision but before the base revision
b2 = ns2.getRoot().builder();
b2.child("z").setProperty("q", "v");
try {
ns2.merge(b2, new CommitHook() {
@Nonnull
@Override
public NodeState processCommit(NodeState before, NodeState after, CommitInfo info) throws CommitFailedException {
runBackgroundRead(ns2);
NodeBuilder builder = after.builder();
if (builder.getChildNode("a").getChildNode("b").hasChildNode("c")) {
builder.child("a").child("b").child("c").child("bar");
} else {
throw new CommitFailedException(CommitFailedException.OAK, 0, "/a/b/c does not exist anymore");
}
return builder.getNodeState();
}
}, CommitInfo.EMPTY);
fail("Merge must fail with CommitFailedException");
} catch (CommitFailedException e) {
// expected
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore in project jackrabbit-oak by apache.
the class JournalIT method cacheInvalidationTest.
@Test
public void cacheInvalidationTest() throws Exception {
final DocumentNodeStore ns1 = createMK(1, 0).getNodeStore();
final DocumentNodeStore ns2 = createMK(2, 0).getNodeStore();
LOG.info("cache size 1: " + getCacheElementCount(ns1.getDocumentStore()));
// invalidate cache under test first
ns1.getDocumentStore().invalidateCache();
{
DocumentStore s = ns1.getDocumentStore();
LOG.info("m.size=" + getCacheElementCount(s));
}
LOG.info("cache size 2: " + getCacheElementCount(ns1.getDocumentStore()));
// first create child node in instance 1
final List<String> paths = createRandomPaths(1, 5000000, 1000);
int i = 0;
for (String path : paths) {
if (i++ % 100 == 0) {
LOG.info("at " + i);
}
getOrCreate(ns1, path, false);
}
final List<String> paths2 = createRandomPaths(20, 2345, 100);
getOrCreate(ns1, paths2, false);
ns1.runBackgroundOperations();
for (String path : paths) {
assertDocCache(ns1, true, path);
}
{
DocumentStore s = ns1.getDocumentStore();
LOG.info("m.size=" + getCacheElementCount(s));
}
LOG.info("cache size 2: " + getCacheElementCount(ns1.getDocumentStore()));
long time = System.currentTimeMillis();
for (int j = 0; j < 100; j++) {
long now = System.currentTimeMillis();
LOG.info("loop " + j + ", " + (now - time) + "ms");
time = now;
final Set<String> electedPaths = choose(paths2, random.nextInt(30));
{
// choose a random few from above created paths and modify them
final long t1 = System.currentTimeMillis();
// make sure ns2 has the latest from ns1
ns2.runBackgroundOperations();
final long t2 = System.currentTimeMillis();
LOG.info("ns2 background took " + (t2 - t1) + "ms");
for (String electedPath : electedPaths) {
// modify /child in another instance 2
setProperty(ns2, electedPath, "p", "ns2" + System.currentTimeMillis(), false);
}
final long t3 = System.currentTimeMillis();
LOG.info("setting props " + (t3 - t2) + "ms");
ns2.runBackgroundOperations();
final long t4 = System.currentTimeMillis();
LOG.info("ns2 background took2 " + (t4 - t3) + "ms");
}
// that should not have changed the fact that we have it cached in 'ns1'
for (String electedPath : electedPaths) {
assertDocCache(ns1, true, electedPath);
}
// doing a backgroundOp now should trigger invalidation
// which thx to the external modification will remove the entry from the cache:
ns1.runBackgroundOperations();
for (String electedPath : electedPaths) {
assertDocCache(ns1, false, electedPath);
}
// when I access it again with 'ns1', then it gets cached again:
for (String electedPath : electedPaths) {
getOrCreate(ns1, electedPath, false);
assertDocCache(ns1, true, electedPath);
}
}
}
Aggregations