use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class JournalIT method doLargeCleanupTest.
private void doLargeCleanupTest(int offset, int size) throws Exception {
Clock clock = new Clock.Virtual();
DocumentMK mk1 = createMK(0, /* clusterId: 0 => uses clusterNodes collection */
0, new MemoryDocumentStore(), new MemoryBlobStore());
DocumentNodeStore ns1 = mk1.getNodeStore();
// make sure we're visible and marked as active
renewClusterIdLease(ns1);
JournalGarbageCollector gc = new JournalGarbageCollector(ns1);
clock.getTimeIncreasing();
clock.getTimeIncreasing();
// cleanup everything that might still be there
gc.gc(0, TimeUnit.MILLISECONDS);
// create entries as parametrized:
for (int i = offset; i < size + offset; i++) {
mk1.commit("/", "+\"regular" + i + "\": {}", null, null);
// always run background ops to 'flush' the change
// into the journal:
ns1.runBackgroundOperations();
}
// sleep 100millis
Thread.sleep(100);
// should now be able to clean up everything
assertEquals(size, gc.gc(0, TimeUnit.MILLISECONDS));
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class CollisionMarkerTest method newDocumentMK.
private static DocumentMK newDocumentMK(DB db, int clusterId) {
DocumentMK mk = new DocumentMK.Builder().setAsyncDelay(0).setLeaseCheck(false).setMongoDB(db).setClusterId(clusterId).open();
// do not retry on conflicts
mk.getNodeStore().setMaxBackOffMillis(0);
return mk;
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class OakFixture method getRDB.
public static OakFixture getRDB(final String name, final String jdbcuri, final String jdbcuser, final String jdbcpasswd, final String tablePrefix, final boolean dropDBAfterTest, final long cacheSize, final boolean useDataStore, final File base, final int dsCacheInMB) {
return new OakFixture(name) {
private DocumentMK[] kernels;
private BlobStoreFixture blobStoreFixture;
private RDBOptions getOptions(boolean dropDBAFterTest, String tablePrefix) {
return new RDBOptions().dropTablesOnClose(dropDBAfterTest).tablePrefix(tablePrefix);
}
private BlobStore getBlobStore(StatisticsProvider statsProvider) {
try {
if (useDataStore) {
initializeBlobStoreFixture(statsProvider);
return blobStoreFixture.setUp();
} else {
DataSource ds = RDBDataSourceFactory.forJdbcUrl(jdbcuri, jdbcuser, jdbcpasswd);
return new RDBBlobStore(ds, getOptions(dropDBAfterTest, tablePrefix));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Oak getOak(int clusterId) throws Exception {
DataSource ds = RDBDataSourceFactory.forJdbcUrl(jdbcuri, jdbcuser, jdbcpasswd);
DocumentMK.Builder mkBuilder = new DocumentMK.Builder().setRDBConnection(ds, getOptions(dropDBAfterTest, tablePrefix)).memoryCacheSize(cacheSize).setClusterId(clusterId).setLogging(false);
BlobStore blobStore = getBlobStore(StatisticsProvider.NOOP);
if (blobStore != null) {
mkBuilder.setBlobStore(blobStore);
}
DocumentMK dmk = mkBuilder.open();
return newOak(dmk.getNodeStore());
}
@Override
public Oak[] setUpCluster(int n, StatisticsProvider statsProvider) throws Exception {
Oak[] cluster = new Oak[n];
kernels = new DocumentMK[cluster.length];
for (int i = 0; i < cluster.length; i++) {
BlobStore blobStore = getBlobStore(statsProvider);
DataSource ds = RDBDataSourceFactory.forJdbcUrl(jdbcuri, jdbcuser, jdbcpasswd);
DocumentMK.Builder mkBuilder = new DocumentMK.Builder().setStatisticsProvider(statsProvider).setRDBConnection(ds, getOptions(dropDBAfterTest, tablePrefix)).memoryCacheSize(cacheSize).setLeaseCheck(false).setClusterId(i + 1).setLogging(false);
if (blobStore != null) {
mkBuilder.setBlobStore(blobStore);
}
kernels[i] = mkBuilder.open();
cluster[i] = newOak(kernels[i].getNodeStore());
}
return cluster;
}
@Override
public void tearDownCluster() {
String dropped = "";
for (DocumentMK kernel : kernels) {
kernel.dispose();
if (kernel.getDocumentStore() instanceof RDBDocumentStore) {
dropped += ((RDBDocumentStore) kernel.getDocumentStore()).getDroppedTables();
}
}
if (dropDBAfterTest) {
if (blobStoreFixture != null) {
blobStoreFixture.tearDown();
}
if (dropped.isEmpty()) {
throw new RuntimeException("dropdb was set, but tables have not been dropped");
}
}
}
private void initializeBlobStoreFixture(StatisticsProvider statsProvider) {
if (useDataStore && blobStoreFixture == null) {
blobStoreFixture = BlobStoreFixture.create(base, true, dsCacheInMB, statsProvider);
}
}
};
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method addNodesConcurrent2.
@Ignore("OAK-1807")
@Test
public void addNodesConcurrent2() throws Exception {
final Thread mainThread = Thread.currentThread();
for (int i = 0; i < NUM_CLUSTER_NODES; i++) {
DocumentMK mk = new DocumentMK.Builder().setMongoDB(createConnection().getDB()).setClusterId(i + 1).open();
mks.add(mk);
}
final Map<String, Exception> exceptions = Collections.synchronizedMap(new HashMap<String, Exception>());
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean stop = new AtomicBoolean();
final UncaughtExceptionHandler ueh = new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
RuntimeException r = new RuntimeException("Exception in thread " + t.getName(), e);
r.printStackTrace();
}
};
for (int i = 0; i < mks.size(); i++) {
DocumentMK mk = mks.get(i);
final Repository repo = new Jcr(mk.getNodeStore()).createRepository();
repos.add(repo);
for (int w = 0; w <= WORKER_COUNT; w++) {
final String name = "Worker-" + (i + 1) + "-" + (w + 1);
final Runnable r = new Runnable() {
final Session session = createAdminSession(repo);
int count = 0;
@Override
public void run() {
try {
Uninterruptibles.awaitUninterruptibly(latch);
session.refresh(false);
Node node = session.getRootNode().addNode(name + count++, "oak:Unstructured");
for (int j = 0; j < NODE_COUNT && !stop.get(); j++) {
node.addNode("node" + j);
session.save();
}
} catch (RepositoryException e) {
RuntimeException r = new RuntimeException("Exception in thread " + name, e);
r.printStackTrace();
exceptions.put(Thread.currentThread().getName(), r);
stop.set(true);
mainThread.interrupt();
} finally {
session.logout();
}
}
};
//Last runnable would be a long running one
Runnable runnable = r;
if (w == WORKER_COUNT) {
runnable = new Runnable() {
@Override
public void run() {
while (!stop.get()) {
r.run();
}
}
};
}
Thread t = new Thread(runnable);
t.setName(name);
t.setUncaughtExceptionHandler(ueh);
workers.add(t);
}
}
for (Thread t : workers) {
t.start();
}
latch.countDown();
TimeUnit.MINUTES.sleep(10);
stop.set(true);
for (Thread t : workers) {
t.join();
}
for (Map.Entry<String, Exception> entry : exceptions.entrySet()) {
// System.out.println("exception in thread " + entry.getKey());
throw entry.getValue();
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method addNodes2.
@Test
public void addNodes2() throws Exception {
for (int i = 0; i < 3; i++) {
DocumentMK mk = new DocumentMK.Builder().setMongoDB(createConnection().getDB()).setAsyncDelay(0).setClusterId(i + 1).open();
mks.add(mk);
}
final DocumentMK mk1 = mks.get(0);
final DocumentMK mk2 = mks.get(1);
final DocumentMK mk3 = mks.get(2);
Repository r1 = new Jcr(mk1.getNodeStore()).createRepository();
repos.add(r1);
Repository r2 = new Jcr(mk2.getNodeStore()).createRepository();
repos.add(r2);
Repository r3 = new Jcr(mk3.getNodeStore()).createRepository();
repos.add(r3);
Session s1 = r1.login(new SimpleCredentials("admin", "admin".toCharArray()));
Session s2 = r2.login(new SimpleCredentials("admin", "admin".toCharArray()));
Session s3 = r3.login(new SimpleCredentials("admin", "admin".toCharArray()));
ensureIndex(s1.getRootNode(), PROP_NAME);
runBackgroundOps(mk1);
runBackgroundOps(mk2);
runBackgroundOps(mk3);
// begin test
Node root2 = s2.getRootNode().addNode("testroot-Worker-2", "nt:unstructured");
createNodes(root2, "testnode0");
s2.save();
createNodes(root2, "testnode1");
runBackgroundOps(mk1);
runBackgroundOps(mk3);
// publish 'testroot-Worker-2/testnode0'
runBackgroundOps(mk2);
Node root3 = s3.getRootNode().addNode("testroot-Worker-3", "nt:unstructured");
createNodes(root3, "testnode0");
s2.save();
createNodes(root2, "testnode2");
// sees 'testroot-Worker-2/testnode0'
runBackgroundOps(mk1);
// sees 'testroot-Worker-2/testnode0'
runBackgroundOps(mk3);
// publish 'testroot-Worker-2/testnode1'
runBackgroundOps(mk2);
// subsequent read on mk3 will read already published docs from mk2
s3.save();
createNodes(root3, "testnode1");
Node root1 = s1.getRootNode().addNode("testroot-Worker-1", "nt:unstructured");
createNodes(root1, "testnode0");
s2.save();
createNodes(root2, "testnode3");
runBackgroundOps(mk1);
runBackgroundOps(mk3);
runBackgroundOps(mk2);
s1.save();
createNodes(root1, "testnode1");
s3.save();
createNodes(root3, "testnode2");
runBackgroundOps(mk1);
s1.save();
s1.logout();
s2.logout();
s3.logout();
}
Aggregations