use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method latestFullCompactedStateShouldNotBeDeleted.
@Test
public void latestFullCompactedStateShouldNotBeDeleted() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setEstimationDisabled(true).setRetainedGenerations(2);
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(gcOptions).build()) {
SegmentNodeState previousHead;
SegmentNodeState head = fileStore.getHead();
// Create a full, self consistent head state. This state will be the
// base for the following tail compactions. This increments the full generation.
fileStore.fullGC();
previousHead = head;
head = fileStore.getHead();
// retainedGeneration = 2 -> the full compacted head and the previous uncompacted head must
// still be available.
traverse(previousHead);
traverse(head);
// Create a tail head state on top of the previous full state. This
// increments the generation, but leaves the full generation untouched.
fileStore.tailGC();
previousHead = head;
head = fileStore.getHead();
// retainedGeneration = 2 -> the tail compacted head and the previous uncompacted head must
// still be available.
traverse(previousHead);
traverse(head);
// Create a tail state on top of the previous tail state. This increments the generation,
// but leaves the full generation untouched. This brings this generations two generations
// away from the latest full head state. Still, the full head state will not be deleted
// because doing so would generate an invalid repository at risk of SegmentNotFoundException.
fileStore.tailGC();
previousHead = head;
head = fileStore.getHead();
// retainedGeneration = 2 -> the tail compacted head and the previous uncompacted head must
// still be available.
traverse(previousHead);
traverse(head);
// Create a full, self consistent head state replacing the current tail of tail
// compacted heads.
fileStore.fullGC();
previousHead = head;
head = fileStore.getHead();
// retainedGeneration = 2 -> the full compacted head and the previous uncompacted head must
// still be available.
traverse(previousHead);
traverse(head);
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method preCompactionReferences.
/**
* Test asserting OAK-3348: Cross gc sessions might introduce references to pre-compacted segments
*/
@Test
public void preCompactionReferences() throws Exception {
for (String ref : new String[] { "merge-before-compact", "merge-after-compact" }) {
File repoDir = new File(getFileStoreFolder(), ref);
FileStore fileStore = fileStoreBuilder(repoDir).withMaxFileSize(2).withGCOptions(defaultGCOptions()).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// add some content
NodeBuilder preGCBuilder = nodeStore.getRoot().builder();
preGCBuilder.setChildNode("test").setProperty("blob", createBlob(nodeStore, 1024 * 1024));
nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// remove it again so we have something to gc
preGCBuilder = nodeStore.getRoot().builder();
preGCBuilder.getChildNode("test").remove();
nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// with a new builder simulate exceeding the update limit.
// This will cause changes to be pre-written to segments
preGCBuilder = nodeStore.getRoot().builder();
preGCBuilder.setChildNode("test").setChildNode("a").setChildNode("b").setProperty("foo", "bar");
for (int k = 0; k < getInteger("update.limit", 10000); k += 2) {
preGCBuilder.setChildNode("dummy").remove();
}
// case 1: merge above changes before compact
if ("merge-before-compact".equals(ref)) {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("n");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
// retained generations
for (int k = 0; k < defaultGCOptions().getRetainedGenerations(); k++) {
fileStore.compactFull();
}
// case 2: merge above changes after compact
if ("merge-after-compact".equals(ref)) {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("n");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
} finally {
fileStore.close();
}
// Re-initialise the file store to simulate off-line gc
fileStore = fileStoreBuilder(repoDir).withMaxFileSize(2).build();
try {
// The 1M blob should get gc-ed
fileStore.cleanup();
assertTrue(ref + " repository size " + fileStore.getStats().getApproximateSize() + " < " + 1024 * 1024, fileStore.getStats().getApproximateSize() < 1024 * 1024);
} finally {
fileStore.close();
}
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method checkpointDeduplicationTest.
@Test
public void checkpointDeduplicationTest() throws Exception {
class CP {
String id;
NodeState uncompacted;
NodeState compacted;
}
CP[] cps = { new CP(), new CP(), new CP(), new CP() };
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Initial content and checkpoint
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("a").setChildNode("aa");
builder.setChildNode("b").setChildNode("bb");
builder.setChildNode("c").setChildNode("cc");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
cps[0].id = nodeStore.checkpoint(Long.MAX_VALUE);
// Add content and another checkpoint
builder = nodeStore.getRoot().builder();
builder.setChildNode("1").setChildNode("11");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
cps[1].id = nodeStore.checkpoint(Long.MAX_VALUE);
// Modify content and another checkpoint
builder = nodeStore.getRoot().builder();
builder.getChildNode("a").getChildNode("aa").setChildNode("aaa");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
cps[2].id = nodeStore.checkpoint(Long.MAX_VALUE);
// Remove content and another checkpoint
builder = nodeStore.getRoot().builder();
builder.getChildNode("a").remove();
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
cps[3].id = nodeStore.checkpoint(Long.MAX_VALUE);
// A final bit of content
builder = nodeStore.getRoot().builder();
builder.setChildNode("d").setChildNode("dd");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState uncompactedSuperRoot = fileStore.getHead();
NodeState uncompactedRoot = nodeStore.getRoot();
for (CP cp : cps) {
cp.uncompacted = nodeStore.retrieve(cp.id);
}
fileStore.compactFull();
NodeState compactedSuperRoot = fileStore.getHead();
NodeState compactedRoot = nodeStore.getRoot();
for (CP cp : cps) {
cp.compacted = nodeStore.retrieve(cp.id);
}
assertEquals(uncompactedSuperRoot, compactedSuperRoot);
assertEquals(uncompactedRoot, compactedRoot);
assertStableIds(uncompactedRoot, compactedRoot, "/root");
for (CP cp : cps) {
assertEquals(cp.uncompacted, cp.compacted);
assertStableIds(cp.uncompacted, cp.compacted, concat("/root/checkpoints", cp.id));
}
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method compactPersistsHead.
@Test
public void compactPersistsHead() throws Exception {
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withMaxFileSize(1).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
// Create ~2MB of data
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
for (int i = 0; i < 10000; i++) {
NodeBuilder c = content.child("c" + i);
for (int j = 0; j < 1000; j++) {
c.setProperty("p" + i, "v" + i);
}
}
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
fileStore.compactFull();
assertEquals(fileStore.getRevisions().getHead(), fileStore.getRevisions().getPersistedHead());
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method binaryRetentionWithDS.
@Test
public void binaryRetentionWithDS() throws IOException, InvalidFileStoreVersionException, CommitFailedException {
try (FileStore fileStore = fileStoreBuilder(new File(getFileStoreFolder(), "segmentstore")).withBlobStore(newBlobStore(new File(getFileStoreFolder(), "blobstore"))).withGCOptions(defaultGCOptions().setGcSizeDeltaEstimation(0)).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("bin", createBlob(nodeStore, 1000000));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
Set<String> expectedReferences = newHashSet();
fileStore.collectBlobReferences(expectedReferences::add);
for (int k = 1; k <= 3; k++) {
fileStore.fullGC();
Set<String> actualReferences = newHashSet();
fileStore.collectBlobReferences(actualReferences::add);
assertEquals("Binary should be retained after " + k + "-th gc cycle", expectedReferences, actualReferences);
}
}
}
Aggregations