use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method crossGCDeduplicationTest.
@Test
public void crossGCDeduplicationTest() throws Exception {
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("a").setChildNode("aa");
builder.setChildNode("b").setChildNode("bb");
builder.setChildNode("c").setChildNode("cc");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState a = nodeStore.getRoot().getChildNode("a");
builder = nodeStore.getRoot().builder();
builder.setChildNode("x").setChildNode("xx");
SegmentNodeState uncompacted = (SegmentNodeState) nodeStore.getRoot();
fileStore.compactFull();
NodeState compacted = nodeStore.getRoot();
assertEquals(uncompacted, compacted);
assertStableIds(uncompacted, compacted, "/root");
builder.setChildNode("y").setChildNode("yy");
builder.getChildNode("a").remove();
NodeState deferCompacted = nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
assertEquals(uncompacted.getSegment().getGcGeneration().nextFull().nonGC(), ((SegmentNodeState) deferCompacted).getSegment().getGcGeneration());
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionTool.
/**
* Test for the Offline compaction tool (OAK-5971)
*/
@Test
public void offlineCompactionTool() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder root = nodeStore.getRoot().builder();
root.child("content");
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
} finally {
fileStore.close();
}
Compact.builder().withPath(getFileStoreFolder()).build().run();
fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
assertTrue(nodeStore.getRoot().hasChildNode("content"));
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method equalContentAfterOC.
@Test
public void equalContentAfterOC() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(gcOptions).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Add initial content
NodeBuilder rootBuilder = nodeStore.getRoot().builder();
addNodes(rootBuilder, 8, "p");
addProperties(rootBuilder, 3);
nodeStore.merge(rootBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState initialRoot = nodeStore.getRoot();
assertTrue(fileStore.compactFull());
NodeState compactedRoot = nodeStore.getRoot();
assertTrue(initialRoot != compactedRoot);
assertEquals(initialRoot, compactedRoot);
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupZeroReclaimedSize.
@Test
public void concurrentWritesCleanupZeroReclaimedSize() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(100);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 100; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
Thread.sleep(100);
fileStore.cleanup();
for (Future<?> result : results) {
assertNull(result.get());
}
long reclaimedSize = fileStoreGCMonitor.getLastReclaimedSize();
assertEquals("Reclaimed size expected is 0, but instead was: " + reclaimedSize, 0, reclaimedSize);
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method testCancelCompactionSNFE.
/**
* See OAK-5517: SNFE when running compaction after a cancelled gc
*/
@Test
public void testCancelCompactionSNFE() throws Throwable {
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2).setEstimationDisabled(true)).withMaxFileSize(1).build();
try {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
final Callable<Void> cancel = new Callable<Void>() {
@Override
public Void call() throws Exception {
// Give the compaction thread a head start
sleepUninterruptibly(1000, MILLISECONDS);
fileStore.cancelGC();
return null;
}
};
for (int k = 0; k < 100; k++) {
NodeBuilder builder = nodeStore.getRoot().builder();
addNodes(builder, 10, k + "-");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
// Cancelling gc should not cause a SNFE on subsequent gc runs
runAsync(cancel);
fileStore.fullGC();
}
} finally {
fileStore.close();
}
}
Aggregations