use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method propertyRetention.
@Test
public void propertyRetention() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).build();
try {
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Add a property
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("test").setProperty("property", "value");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// Segment id of the current segment
NodeState test = nodeStore.getRoot().getChildNode("test");
SegmentId id = ((SegmentNodeState) test).getRecordId().getSegmentId();
fileStore.flush();
assertTrue(fileStore.containsSegment(id));
// Add enough content to fill up the current tar file
builder = nodeStore.getRoot().builder();
addContent(builder.setChildNode("dump"));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
// Segment and property still there
assertTrue(fileStore.containsSegment(id));
PropertyState property = test.getProperty("property");
assertEquals("value", property.getValue(STRING));
// GC should remove the segment
fileStore.flush();
// retained generations
for (int k = 0; k < gcOptions.getRetainedGenerations(); k++) {
fileStore.compactFull();
}
fileStore.cleanup();
try {
fileStore.readSegment(id);
fail("Segment " + id + " should be gc'ed");
} catch (SegmentNotFoundException ignore) {
}
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupNoNewGen.
/**
* Test asserting OAK-4669: No new generation of tar should be created when the segments are the same
* and when various indices are created.
*/
@Test
public void concurrentWritesCleanupNoNewGen() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
File fileStoreFolder = getFileStoreFolder();
final FileStore fileStore = fileStoreBuilder(fileStoreFolder).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(5);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 512 * 512));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 5; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
for (Future<?> result : results) {
assertNull(result.get());
}
fileStore.cleanup();
for (String fileName : fileStoreFolder.list()) {
if (fileName.endsWith(".tar")) {
int pos = fileName.length() - "a.tar".length();
char generation = fileName.charAt(pos);
assertTrue("Expected generation is 'a', but instead was: '" + generation + "' for file " + fileName, generation == 'a');
}
}
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method cleanupCyclicGraph.
/**
* Set a root node referring to a child node that lives in a different segments. Depending
* on the order how the SegmentBufferWriters associated with the threads used to create the
* nodes are flushed, this will introduce a forward reference between the segments.
* The current cleanup mechanism cannot handle forward references and removes the referenced
* segment causing a SNFE.
* This is a regression introduced with OAK-1828.
*/
@Test
public void cleanupCyclicGraph() throws Exception {
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).build();
final SegmentReader reader = fileStore.getReader();
final SegmentWriter writer = fileStore.getWriter();
final BlobStore blobStore = fileStore.getBlobStore();
final SegmentNodeState oldHead = fileStore.getHead();
final SegmentNodeState child = run(new Callable<SegmentNodeState>() {
@Override
public SegmentNodeState call() throws Exception {
NodeBuilder builder = EMPTY_NODE.builder();
return new SegmentNodeState(reader, writer, blobStore, writer.writeNode(EMPTY_NODE));
}
});
SegmentNodeState newHead = run(new Callable<SegmentNodeState>() {
@Override
public SegmentNodeState call() throws Exception {
NodeBuilder builder = oldHead.builder();
builder.setChildNode("child", child);
return new SegmentNodeState(reader, writer, blobStore, writer.writeNode(builder.getNodeState()));
}
});
writer.flush();
fileStore.getRevisions().setHead(oldHead.getRecordId(), newHead.getRecordId());
fileStore.close();
fileStore = fileStoreBuilder(getFileStoreFolder()).build();
traverse(fileStore.getHead());
fileStore.cleanup();
// Traversal after cleanup might result in an SNFE
traverse(fileStore.getHead());
fileStore.close();
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method keepStableIdOnFlush.
@Test
public void keepStableIdOnFlush() throws Exception {
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
// Initial content and checkpoint
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("a");
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
nodeStore.checkpoint(Long.MAX_VALUE);
// A final bit of content
builder = nodeStore.getRoot().builder();
for (int k = 0; k < UPDATE_LIMIT; k++) {
builder.setChildNode("b-" + k);
}
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
NodeState uncompactedSuperRoot = fileStore.getHead();
NodeState uncompactedRoot = nodeStore.getRoot();
fileStore.compactFull();
NodeState compactedSuperRoot = fileStore.getHead();
NodeState compactedRoot = nodeStore.getRoot();
assertEquals(uncompactedSuperRoot, compactedSuperRoot);
assertEquals(uncompactedRoot, compactedRoot);
assertStableIds(uncompactedRoot, compactedRoot, "/root");
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method testMixedSegments.
/**
* Regression test for OAK-2192 testing for mixed segments. This test does not
* cover OAK-3348. I.e. it does not assert the segment graph is free of cross
* gc generation references.
*/
@Test
public void testMixedSegments() throws Exception {
FileStore store = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
final AtomicBoolean compactionSuccess = new AtomicBoolean(true);
NodeBuilder root = nodeStore.getRoot().builder();
createNodes(root.setChildNode("test"), 10, 3);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
final Set<UUID> beforeSegments = new HashSet<UUID>();
collectSegments(store.getReader(), store.getRevisions(), beforeSegments);
final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true);
final List<String> failedCommits = newArrayList();
Thread[] threads = new Thread[10];
for (int k = 0; k < threads.length; k++) {
final int threadId = k;
threads[k] = new Thread(new Runnable() {
@Override
public void run() {
for (int j = 0; run.get(); j++) {
String nodeName = "b-" + threadId + "," + j;
try {
NodeBuilder root = nodeStore.getRoot().builder();
root.setChildNode(nodeName);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
Thread.sleep(5);
} catch (CommitFailedException e) {
failedCommits.add(nodeName);
} catch (InterruptedException e) {
Thread.interrupted();
break;
}
}
}
});
threads[k].start();
}
store.compactFull();
run.set(false);
for (Thread t : threads) {
t.join();
}
store.flush();
assumeTrue("Failed to acquire compaction lock", compactionSuccess.get());
assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty());
Set<UUID> afterSegments = new HashSet<UUID>();
collectSegments(store.getReader(), store.getRevisions(), afterSegments);
try {
for (UUID u : beforeSegments) {
assertFalse("Mixed segments found: " + u, afterSegments.contains(u));
}
} finally {
store.close();
}
}
Aggregations