use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method cancelOfflineCompaction.
@Test
public void cancelOfflineCompaction() throws Exception {
final AtomicBoolean cancelCompaction = new AtomicBoolean(true);
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setOffline()).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
NodeState uncompactedRoot = nodeStore.getRoot();
// Keep cancelling compaction
new Thread(() -> {
while (cancelCompaction.get()) {
fileStore.cancelGC();
}
}).start();
fileStore.compactFull();
// Cancelling compaction must not corrupt the repository. See OAK-7050.
NodeState compactedRoot = nodeStore.getRoot();
assertTrue(compactedRoot.exists());
assertEquals(uncompactedRoot, compactedRoot);
} finally {
cancelCompaction.set(false);
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompaction.
@Test
public void offlineCompaction() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// 5MB blob
int blobSize = 5 * 1024 * 1024;
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size1 = fileStore.getStats().getApproximateSize();
log.debug("File store size {}", byteCountToDisplaySize(size1));
// Create a property with 5 MB blob
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob1", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size2 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size2 > size1);
assertTrue("the store should grow of at least the size of the blob", size2 - size1 > blobSize);
// Now remove the property. No gc yet -> size doesn't shrink
builder = nodeStore.getRoot().builder();
builder.removeProperty("blob1");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size3 = fileStore.getStats().getApproximateSize();
assertTrue("the size should grow", size3 > size2);
// 1st gc cycle -> 1st blob should get collected
fileStore.compactFull();
fileStore.cleanup();
long size4 = fileStore.getStats().getApproximateSize();
assertTrue("the store should shrink", size4 < size3);
assertTrue("the store should shrink of at least the size of the blob", size3 - size4 >= blobSize);
// Add another 5MB binary
builder = nodeStore.getRoot().builder();
builder.setProperty("blob2", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size5 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size5 > size4);
assertTrue("the store should grow of at least the size of the blob", size5 - size4 > blobSize);
// 2st gc cycle -> 2nd blob should *not* be collected
fileStore.compactFull();
fileStore.cleanup();
long size6 = fileStore.getStats().getApproximateSize();
assertTrue("the blob should not be collected", size6 > blobSize);
// 3rd gc cycle -> no significant change
fileStore.compactFull();
fileStore.cleanup();
long size7 = fileStore.getStats().getApproximateSize();
assertTrue("the blob should not be collected", size7 > blobSize);
// No data loss
byte[] blob = ByteStreams.toByteArray(nodeStore.getRoot().getProperty("blob2").getValue(Type.BINARY).getNewStream());
assertEquals(blobSize, blob.length);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentCleanup.
/**
* Test asserting OAK-4700: Concurrent cleanup must not remove segments that are still reachable
*/
@Test
public void concurrentCleanup() throws Exception {
File fileStoreFolder = getFileStoreFolder();
final FileStore fileStore = fileStoreBuilder(fileStoreFolder).withMaxFileSize(1).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(50);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 512 * 512));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
final Callable<Void> concurrentCleanTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
fileStore.cleanup();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 50; i++) {
if (i % 2 == 0) {
results.add(executorService.submit(concurrentWriteTask));
} else {
results.add(executorService.submit(concurrentCleanTask));
}
}
for (Future<?> result : results) {
assertNull(result.get());
}
for (String fileName : fileStoreFolder.list()) {
if (fileName.endsWith(".tar")) {
int pos = fileName.length() - "a.tar".length();
char generation = fileName.charAt(pos);
assertEquals("Expected nothing to be cleaned but generation '" + generation + "' for file " + fileName + " indicates otherwise.", "a", valueOf(generation));
}
}
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class HeavyWriteIT method heavyWrite.
@Test
public void heavyWrite() throws Exception {
final FileStore store = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(128).withMemoryMapping(false).withGCOptions(defaultGCOptions().setRetainedGenerations(42)).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
int writes = 100;
final AtomicBoolean run = new AtomicBoolean(true);
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
for (int k = 1; run.get(); k++) {
try {
store.fullGC();
Thread.sleep(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
} catch (IOException e) {
e.printStackTrace();
}
}
}
});
thread.start();
try {
for (int k = 1; k <= writes; k++) {
NodeBuilder root = nodeStore.getRoot().builder();
NodeBuilder test = root.setChildNode("test");
createNodes(nodeStore, test, 10, 2);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
root = nodeStore.getRoot().builder();
root.getChildNode("test").remove();
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
} finally {
run.set(false);
thread.join();
store.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class ManyChildNodesIT method manyChildNodesOC.
/**
* Offline compaction should be able to deal with many child nodes in constant memory.
*/
@Test
public void manyChildNodesOC() throws Exception {
try (FileStore fileStore = createFileStore()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
nodeStore.checkpoint(Long.MAX_VALUE);
NodeBuilder builder = nodeStore.getRoot().builder();
addManyNodes(builder);
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState uncompactedRoot = nodeStore.getRoot();
assertTrue(fileStore.compactFull());
NodeState compactedRoot = nodeStore.getRoot();
assertTrue(uncompactedRoot != compactedRoot);
assertEquals(uncompactedRoot, compactedRoot);
}
}
Aggregations