use of org.apache.jackrabbit.oak.api.CommitFailedException in project jackrabbit-oak by apache.
the class ResetClusterIdCommand method deleteClusterId.
private static void deleteClusterId(NodeStore store) {
NodeBuilder builder = store.getRoot().builder();
NodeBuilder clusterConfigNode = builder.getChildNode(ClusterRepositoryInfo.CLUSTER_CONFIG_NODE);
if (!clusterConfigNode.exists()) {
// if it doesn't exist, then there is no way to delete
System.out.println("clusterId was never set or already deleted.");
return;
}
if (!clusterConfigNode.hasProperty(ClusterRepositoryInfo.CLUSTER_ID_PROP)) {
// the config node exists, but the clusterId not
// so again, no way to delete
System.out.println("clusterId was never set or already deleted.");
return;
}
String oldClusterId = clusterConfigNode.getProperty(ClusterRepositoryInfo.CLUSTER_ID_PROP).getValue(Type.STRING);
clusterConfigNode.removeProperty(ClusterRepositoryInfo.CLUSTER_ID_PROP);
try {
store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
System.out.println("clusterId deleted successfully. (old id was " + oldClusterId + ")");
} catch (CommitFailedException e) {
System.err.println("Failed to delete clusterId due to exception: " + e.getMessage());
e.printStackTrace();
}
}
use of org.apache.jackrabbit.oak.api.CommitFailedException in project jackrabbit-oak by apache.
the class PrivilegeDefinitionWriterTest method testCommitFails.
@Test(expected = RepositoryException.class)
public void testCommitFails() throws Exception {
Root r = Mockito.spy(root);
doThrow(new CommitFailedException(CommitFailedException.OAK, 1, "msg")).when(r).commit();
PrivilegeDefinitionWriter writer = new PrivilegeDefinitionWriter(r);
writer.writeDefinition(new ImmutablePrivilegeDefinition("tmp", true, asList(JCR_READ_ACCESS_CONTROL, JCR_MODIFY_ACCESS_CONTROL)));
}
use of org.apache.jackrabbit.oak.api.CommitFailedException in project jackrabbit-oak by apache.
the class IndexTrackerTest method notifyFailedIndexing.
@Test
public void notifyFailedIndexing() throws Exception {
createIndex("foo");
//1. Create and populate index
NodeState before = builder.getNodeState();
builder.setProperty("foo", "bar");
NodeState after = builder.getNodeState();
NodeState indexed = hook.processCommit(before, after, CommitInfo.EMPTY);
tracker.update(indexed);
builder = indexed.builder();
indexed = corruptIndex("/oak:index/foo");
builder = indexed.builder();
builder.setProperty("foo", "bar2");
after = builder.getNodeState();
try {
hook.processCommit(before, after, CommitInfo.EMPTY);
fail("Indexing should have failed");
} catch (CommitFailedException ignore) {
}
assertTrue(corruptIndexHandler.getFailingIndexData("async").containsKey("/oak:index/foo"));
}
use of org.apache.jackrabbit.oak.api.CommitFailedException in project jackrabbit-oak by apache.
the class OutOfBandIndexer method preformIndexUpdate.
private void preformIndexUpdate(NodeState baseState) throws IOException, CommitFailedException {
NodeBuilder builder = copyOnWriteStore.getRoot().builder();
IndexUpdate indexUpdate = new IndexUpdate(createIndexEditorProvider(), REINDEX_LANE, copyOnWriteStore.getRoot(), builder, this, this, CommitInfo.EMPTY, CorruptIndexHandler.NOOP);
configureEstimators(indexUpdate);
//Do not use EmptyState as before otherwise the IndexUpdate would
//unnecessary traverse the whole repo post reindexing. With use of baseState
//It would only traverse the diff i.e. those index definitions paths
//whose lane has been changed
NodeState before = baseState;
NodeState after = copyOnWriteStore.getRoot();
CommitFailedException exception = EditorDiff.process(VisibleEditor.wrap(indexUpdate), before, after);
if (exception != null) {
throw exception;
}
}
use of org.apache.jackrabbit.oak.api.CommitFailedException in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method testMixedSegments.
/**
* Regression test for OAK-2192 testing for mixed segments. This test does not
* cover OAK-3348. I.e. it does not assert the segment graph is free of cross
* gc generation references.
*/
@Test
public void testMixedSegments() throws Exception {
FileStore store = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
final AtomicBoolean compactionSuccess = new AtomicBoolean(true);
NodeBuilder root = nodeStore.getRoot().builder();
createNodes(root.setChildNode("test"), 10, 3);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
final Set<UUID> beforeSegments = new HashSet<UUID>();
collectSegments(store.getReader(), store.getRevisions(), beforeSegments);
final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true);
final List<String> failedCommits = newArrayList();
Thread[] threads = new Thread[10];
for (int k = 0; k < threads.length; k++) {
final int threadId = k;
threads[k] = new Thread(new Runnable() {
@Override
public void run() {
for (int j = 0; run.get(); j++) {
String nodeName = "b-" + threadId + "," + j;
try {
NodeBuilder root = nodeStore.getRoot().builder();
root.setChildNode(nodeName);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
Thread.sleep(5);
} catch (CommitFailedException e) {
failedCommits.add(nodeName);
} catch (InterruptedException e) {
Thread.interrupted();
break;
}
}
}
});
threads[k].start();
}
store.compact();
run.set(false);
for (Thread t : threads) {
t.join();
}
store.flush();
assumeTrue("Failed to acquire compaction lock", compactionSuccess.get());
assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty());
Set<UUID> afterSegments = new HashSet<UUID>();
collectSegments(store.getReader(), store.getRevisions(), afterSegments);
try {
for (UUID u : beforeSegments) {
assertFalse("Mixed segments found: " + u, afterSegments.contains(u));
}
} finally {
store.close();
}
}
Aggregations