use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method after.
@After
public void after() throws Exception {
workers.clear();
for (Repository repo : repos) {
dispose(repo);
}
repos.clear();
for (DocumentMK mk : mks) {
mk.dispose();
}
mks.clear();
dropDB();
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method rebaseVisibility.
@Test
public void rebaseVisibility() throws Exception {
for (int i = 0; i < 2; i++) {
DocumentMK mk = new DocumentMK.Builder().setMongoDB(createConnection().getDB()).setAsyncDelay(0).setClusterId(i + 1).open();
mks.add(mk);
}
final DocumentMK mk1 = mks.get(0);
final DocumentMK mk2 = mks.get(1);
Repository r1 = new Jcr(mk1.getNodeStore()).createRepository();
repos.add(r1);
Repository r2 = new Jcr(mk2.getNodeStore()).createRepository();
repos.add(r2);
Session s1 = r1.login(new SimpleCredentials("admin", "admin".toCharArray()));
Session s2 = r2.login(new SimpleCredentials("admin", "admin".toCharArray()));
Node root1 = s1.getRootNode().addNode("session-1");
s1.save();
Node root2 = s2.getRootNode().addNode("session-2");
s2.save();
runBackgroundOps(mk1);
runBackgroundOps(mk2);
runBackgroundOps(mk1);
createNodes(root1, "nodes");
createNodes(root2, "nodes");
s2.save();
runBackgroundOps(mk2);
runBackgroundOps(mk1);
assertFalse(s1.getRootNode().hasNode("session-2/nodes"));
s1.refresh(true);
assertTrue(s1.getRootNode().hasNode("session-2/nodes"));
s1.logout();
s2.logout();
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method addNodes.
@Test
public void addNodes() throws Exception {
for (int i = 0; i < 2; i++) {
DocumentMK mk = new DocumentMK.Builder().setMongoDB(createConnection().getDB()).setClusterId(i + 1).open();
mks.add(mk);
}
final DocumentMK mk1 = mks.get(0);
final DocumentMK mk2 = mks.get(1);
Repository r1 = new Jcr(mk1.getNodeStore()).createRepository();
repos.add(r1);
Repository r2 = new Jcr(mk2.getNodeStore()).createRepository();
repos.add(r2);
Session s1 = r1.login(new SimpleCredentials("admin", "admin".toCharArray()));
Session s2 = r2.login(new SimpleCredentials("admin", "admin".toCharArray()));
ensureIndex(s1.getRootNode(), PROP_NAME);
ensureIndex(s2.getRootNode(), PROP_NAME);
Map<String, Exception> exceptions = Collections.synchronizedMap(new HashMap<String, Exception>());
createNodes(s1, "testroot-1", 1, 1, exceptions);
createNodes(s2, "testroot-2", 1, 1, exceptions);
for (Map.Entry<String, Exception> entry : exceptions.entrySet()) {
throw entry.getValue();
}
s1.logout();
s2.logout();
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class ConcurrentAddNodesClusterIT method addNodesConcurrent.
@Test
public void addNodesConcurrent() throws Exception {
for (int i = 0; i < NUM_CLUSTER_NODES; i++) {
DocumentMK mk = new DocumentMK.Builder().setMongoDB(createConnection().getDB()).setClusterId(i + 1).open();
mks.add(mk);
}
Map<String, Exception> exceptions = Collections.synchronizedMap(new HashMap<String, Exception>());
for (int i = 0; i < mks.size(); i++) {
DocumentMK mk = mks.get(i);
Repository repo = new Jcr(mk.getNodeStore()).createRepository();
repos.add(repo);
workers.add(new Thread(new Worker(repo, exceptions), "Worker-" + (i + 1)));
}
for (Thread t : workers) {
t.start();
}
for (Thread t : workers) {
t.join();
}
for (Map.Entry<String, Exception> entry : exceptions.entrySet()) {
// System.out.println("exception in thread " + entry.getKey());
throw entry.getValue();
}
}
use of org.apache.jackrabbit.oak.plugins.document.DocumentMK in project jackrabbit-oak by apache.
the class DocumentMKConcurrentAddTest method testConcurrentAdd.
/**
* Creates NB_THREADS microkernels, each committing two nodes (one parent,
* one child) in its own thread. The nodes being committed by separate
* threads do not overlap / conflict.
*
* @throws Exception
*/
@Test
public void testConcurrentAdd() throws Exception {
// create workers
List<Callable<String>> cs = new LinkedList<Callable<String>>();
for (int i = 0; i < NB_THREADS; i++) {
// each callable has its own microkernel
// (try to assign a cluster id different from all other already existing nodes stores)
final DocumentMK mk = createMicroKernel(super.mk.getNodeStore().getClusterId() + 1 + i);
mks.add(mk);
// diff for adding one node and one child node
final List<String> stmts = new LinkedList<String>();
stmts.add("+\"node" + i + "\":{}");
stmts.add("+\"node" + i + "/child\":{}");
// create callable
Callable<String> c = new Callable<String>() {
@Override
public String call() throws Exception {
// commit all statements, one at a time
String r = null;
for (String stmt : stmts) {
r = mk.commit("/", stmt, null, "msg");
}
return r;
}
};
cs.add(c);
}
// run workers concurrently
ExecutorService executor = Executors.newFixedThreadPool(NB_THREADS);
List<Future<String>> fs = new LinkedList<Future<String>>();
for (Callable<String> c : cs) {
fs.add(executor.submit(c));
}
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
// an exception
for (Future<String> f : fs) {
f.get();
}
}
Aggregations