use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method randomAccessFileConcurrentReadAndLength.
@Test
public void randomAccessFileConcurrentReadAndLength() throws Exception {
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(300);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
Callable<Void> concurrentCleanupTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
fileStore.cleanup();
return null;
}
};
Callable<Void> concurrentReferenceCollector = new Callable<Void>() {
@Override
public Void call() throws Exception {
fileStore.collectBlobReferences(s -> {
// Do nothing.
});
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 100; i++) {
results.add(executorService.submit(concurrentWriteTask));
results.add(executorService.submit(concurrentCleanupTask));
results.add(executorService.submit(concurrentReferenceCollector));
}
for (Future<?> result : results) {
assertNull(result.get());
}
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method compactionNoBinaryClone.
@Test
public void compactionNoBinaryClone() throws Exception {
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withStatisticsProvider(new DefaultStatisticsProvider(executor)).withMaxFileSize(1).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// 5MB blob
int blobSize = 5 * 1024 * 1024;
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size1 = fileStore.getStats().getApproximateSize();
log.debug("File store size {}", byteCountToDisplaySize(size1));
// Create a property with 5 MB blob
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob1", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size2 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size2 > size1);
assertTrue("the store should grow of at least the size of the blob", size2 - size1 >= blobSize);
// Now remove the property. No gc yet -> size doesn't shrink
builder = nodeStore.getRoot().builder();
builder.removeProperty("blob1");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size3 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size3 > size2);
// 1st gc cycle -> no reclaimable garbage...
fileStore.compactFull();
fileStore.cleanup();
long size4 = fileStore.getStats().getApproximateSize();
// Add another 5MB binary doubling the blob size
builder = nodeStore.getRoot().builder();
builder.setProperty("blob2", createBlob(nodeStore, blobSize));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
long size5 = fileStore.getStats().getApproximateSize();
assertTrue("the store should grow", size5 > size4);
assertTrue("the store should grow of at least the size of the blob", size5 - size4 >= blobSize);
// 2st gc cycle -> 1st blob should get collected
fileStore.compactFull();
fileStore.cleanup();
long size6 = fileStore.getStats().getApproximateSize();
assertTrue("the store should shrink", size6 < size5);
assertTrue("the store should shrink of at least the size of the blob", size5 - size6 >= blobSize);
// 3rtd gc cycle -> no significant change
fileStore.compactFull();
fileStore.cleanup();
long size7 = fileStore.getStats().getApproximateSize();
// No data loss
byte[] blob = ByteStreams.toByteArray(nodeStore.getRoot().getProperty("blob2").getValue(Type.BINARY).getNewStream());
assertEquals(blobSize, blob.length);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method testCancelCompaction.
@Test
public void testCancelCompaction() throws Throwable {
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withMaxFileSize(1).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
NodeBuilder builder = nodeStore.getRoot().builder();
addNodes(builder, 10, "");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
FutureTask<Boolean> async = runAsync(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
boolean cancelled = false;
for (int k = 0; !cancelled && k < 1000; k++) {
cancelled = !fileStore.compactFull();
}
return cancelled;
}
});
// Give the compaction thread a head start
sleepUninterruptibly(1, SECONDS);
fileStore.close();
try {
assertTrue(async.get());
} catch (ExecutionException e) {
if (!(e.getCause() instanceof IllegalStateException)) {
// store being already closed, which is kinda expected
throw e.getCause();
}
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinR1.
/**
* Create 2 binary nodes with same content and same reference. Verify
* de-duplication capabilities of compaction
*/
@Test
public void offlineCompactionBinR1() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
int blobSize = 5 * 1024 * 1024;
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
Blob b = nodeStore.createBlob(new ByteArrayInputStream(data));
NodeBuilder c1 = content.child("c1");
c1.setProperty("blob1", b);
NodeBuilder c2 = content.child("c2");
c2.setProperty("blob2", b);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
// 5Mb, de-duplication by the SegmentWriter
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compactFull();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
assertSize("with compacted binaries", size2, 0, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionCps.
/**
* Create a lot of data nodes (no binaries) and a few checkpoints, verify
* that compacting checkpoints will not cause the size to explode
*/
@Test
public void offlineCompactionCps() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
fileStore.compactFull();
fileStore.cleanup();
// Compacts to 548Kb
long size0 = fileStore.getStats().getApproximateSize();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
assertTrue("the size should grow or stay the same", size1 >= size0);
// TODO the following assertion doesn't say anything useful. The
// conveyed message is "the repository can shrink, grow or stay the
// same, as long as it remains in a 10% margin of the previous size
// that I took out of thin air". It has to be fixed or removed.
// fileStore.compact();
// fileStore.cleanup();
// long size2 = fileStore.getStats().getApproximateSize();
// assertSize("with checkpoints compacted", size2, size1 * 9/10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
Aggregations