use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class NodeRecordTest method baseNodeStateShouldBeReusedAcrossGenerations.
@Test
public void baseNodeStateShouldBeReusedAcrossGenerations() throws Exception {
try (FileStore store = newFileStore()) {
Generation generation = new Generation();
// Create a new SegmentWriter. It's necessary not to have any cache,
// otherwise the write of some records (in this case, template
// records) will be cached and prevent this test to fail.
SegmentWriter writer = defaultSegmentWriterBuilder("test").withGeneration(generation).withWriterPool().with(nodesOnlyCache()).build(store);
generation.set(newGCGeneration(1, 0, false));
// Write a new node with a non trivial template. This record will
// belong to generation 1.
RecordId baseId = writer.writeNode(EmptyNodeState.EMPTY_NODE.builder().setProperty("a", "a").setProperty("k", "v1").getNodeState());
SegmentNodeState base = new SegmentNodeState(store.getReader(), writer, store.getBlobStore(), baseId);
writer.flush();
generation.set(newGCGeneration(2, 0, false));
// Compact that same record to generation 2.
SegmentNodeState compacted = new SegmentNodeState(store.getReader(), writer, store.getBlobStore(), writer.writeNode(base));
writer.flush();
// Assert that even if the two records have the same stable ID,
// their physical ID and the ID of their templates are different.
assertEquals(base.getStableId(), compacted.getStableId());
assertNotEquals(base.getRecordId(), compacted.getRecordId());
assertNotEquals(base.getTemplateId(), compacted.getTemplateId());
// Create a new builder from the base, pre-compaction node state.
// The base node state is from generation 1, but this builder will
// be from generation 2 because every builder in the pool is
// affected by the change of generation. Writing a node state from
// this builder should perform a partial compaction.
SegmentNodeState modified = (SegmentNodeState) base.builder().setProperty("k", "v2").getNodeState();
// Assert that the stable ID of this node state is different from
// the one in the base state. This is expected, since we have
// modified the value of a property.
assertNotEquals(modified.getStableId(), base.getStableId());
assertNotEquals(modified.getStableId(), compacted.getStableId());
// The node state should have reused the template from the compacted
// node state, since this template didn't change and the code should
// have detected that the base state of this builder was compacted
// to a new generation.
assertEquals(modified.getTemplateId(), compacted.getTemplateId());
// Similarly the node state should have reused the property from
// the compacted node state, since this property didn't change.
Record modifiedProperty = (Record) modified.getProperty("a");
Record compactedProperty = (Record) compacted.getProperty("a");
assertNotNull(modifiedProperty);
assertNotNull(compactedProperty);
assertEquals(modifiedProperty.getRecordId(), compactedProperty.getRecordId());
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class NodeRecordTest method stableIdShouldPersistAcrossGenerations.
@Test
public void stableIdShouldPersistAcrossGenerations() throws Exception {
try (FileStore store = newFileStore()) {
SegmentWriter writer;
writer = defaultSegmentWriterBuilder("1").withGeneration(newGCGeneration(1, 0, false)).build(store);
SegmentNodeState one = new SegmentNodeState(store.getReader(), writer, store.getBlobStore(), writer.writeNode(EmptyNodeState.EMPTY_NODE));
writer.flush();
writer = defaultSegmentWriterBuilder("2").withGeneration(newGCGeneration(2, 0, false)).build(store);
SegmentNodeState two = new SegmentNodeState(store.getReader(), writer, store.getBlobStore(), writer.writeNode(one));
writer.flush();
writer = defaultSegmentWriterBuilder("3").withGeneration(newGCGeneration(3, 0, false)).build(store);
SegmentNodeState three = new SegmentNodeState(store.getReader(), writer, store.getBlobStore(), writer.writeNode(two));
writer.flush();
assertArrayEquals(asByteArray(three.getStableIdBytes()), asByteArray(two.getStableIdBytes()));
assertArrayEquals(asByteArray(two.getStableIdBytes()), asByteArray(one.getStableIdBytes()));
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class SegmentBufferWriterTest method nonDirtyBuffersShouldNotBeFlushed.
@Test
public void nonDirtyBuffersShouldNotBeFlushed() throws Exception {
List<SegmentId> before;
try (FileStore store = openFileStore()) {
// init
}
try (ReadOnlyFileStore store = openReadOnlyFileStore()) {
before = newArrayList(store.getSegmentIds());
}
try (FileStore store = openFileStore()) {
defaultSegmentWriterBuilder("t").build(store).flush();
}
List<SegmentId> after;
try (ReadOnlyFileStore store = openReadOnlyFileStore()) {
after = newArrayList(store.getSegmentIds());
}
assertEquals(before, after);
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class SegmentOverflowExceptionIT method run.
@Test
public void run() throws Exception {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCMonitor(gcMonitor).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build()) {
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
long start = System.currentTimeMillis();
int snfeCount = 0;
while (System.currentTimeMillis() - start < TIMEOUT && fileStore.getStats().getApproximateSize() < MAX_REPO_SIZE) {
try {
NodeBuilder root = nodeStore.getRoot().builder();
while (rnd.nextInt(100) != 0) {
modify(nodeStore, root);
}
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
if (compact) {
compact = false;
fileStore.fullGC();
}
} catch (SegmentNotFoundException snfe) {
// due the small retention value for segments.
if (snfeCount++ > 100) {
throw snfe;
}
}
}
} finally {
executor.shutdown();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class BigInlinedBinaryIT method largeBlob.
@Test
public void largeBlob() throws IOException, CommitFailedException, InvalidFileStoreVersionException {
try (FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).build()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setChildNode("node").setProperty("blob", createBlob((LEVEL_SIZE * LEVEL_SIZE * LEVEL_SIZE + 1L) * BLOCK_SIZE));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
Blob blob = nodeStore.getRoot().getChildNode("node").getProperty("blob").getValue(BINARY);
byte[] buffer = new byte[4096];
blob.getNewStream().read(buffer, 0, buffer.length);
}
}
Aggregations