use of org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinR1.
/**
* Create 2 binary nodes with same content and same reference. Verify
* de-duplication capabilities of compaction
*/
@Test
public void offlineCompactionBinR1() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
int blobSize = 5 * 1024 * 1024;
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
Blob b = nodeStore.createBlob(new ByteArrayInputStream(data));
NodeBuilder c1 = content.child("c1");
c1.setProperty("blob1", b);
NodeBuilder c2 = content.child("c2");
c2.setProperty("blob2", b);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
// 5Mb, de-duplication by the SegmentWriter
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
assertSize("with compacted binaries", size2, 0, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method equalContentAfterOC.
@Test
public void equalContentAfterOC() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(gcOptions).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Add initial content
NodeBuilder rootBuilder = nodeStore.getRoot().builder();
addNodes(rootBuilder, 8, "p");
addProperties(rootBuilder, 3);
nodeStore.merge(rootBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState initialRoot = nodeStore.getRoot();
assertTrue(fileStore.compact());
NodeState compactedRoot = nodeStore.getRoot();
assertTrue(initialRoot != compactedRoot);
assertEquals(initialRoot, compactedRoot);
}
}
use of org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionCps.
/**
* Create a lot of data nodes (no binaries) and a few checkpoints, verify
* that compacting checkpoints will not cause the size to explode
*/
@Test
public void offlineCompactionCps() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
fileStore.compact();
fileStore.cleanup();
// Compacts to 548Kb
long size0 = fileStore.getStats().getApproximateSize();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
assertTrue("the size should grow or stay the same", size1 >= size0);
// TODO the following assertion doesn't say anything useful. The
// conveyed message is "the repository can shrink, grow or stay the
// same, as long as it remains in a 10% margin of the previous size
// that I took out of thin air". It has to be fixed or removed.
// fileStore.compact();
// fileStore.cleanup();
// long size2 = fileStore.getStats().getApproximateSize();
// assertSize("with checkpoints compacted", size2, size1 * 9/10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
Aggregations