use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class StandbyStoreService method activate.
@Activate
private void activate(ComponentContext context) {
SegmentStore segmentStore = storeProvider.getSegmentStore();
if (!(segmentStore instanceof FileStore)) {
throw new IllegalArgumentException("Unexpected SegmentStore implementation");
}
FileStore fileStore = (FileStore) segmentStore;
String mode = valueOf(context.getProperties().get(MODE));
if (MODE_PRIMARY.equals(mode)) {
bootstrapMaster(context, fileStore);
return;
}
if (MODE_STANDBY.equals(mode)) {
bootstrapSlave(context, fileStore);
return;
}
throw new IllegalArgumentException(String.format("Unexpected mode property, got '%s'", mode));
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class ReferenceBinaryIT method fixtures.
@Parameterized.Parameters(name = "{0}")
public static Collection<Object[]> fixtures() throws Exception {
File file = getTestDir("tar");
FileStore fileStore = FileStoreBuilder.fileStoreBuilder(file).withBlobStore(createBlobStore()).withMaxFileSize(256).withMemoryMapping(true).build();
SegmentNodeStore sns = SegmentNodeStoreBuilders.builder(fileStore).build();
List<Object[]> fixtures = Lists.newArrayList();
SegmentTarFixture segmentTarFixture = new SegmentTarFixture(sns);
if (segmentTarFixture.isAvailable()) {
fixtures.add(new Object[] { segmentTarFixture });
}
FileBlobStore fbs = new FileBlobStore(getTestDir("fbs1").getAbsolutePath());
fbs.setReferenceKeyPlainText("foobar");
FileStore fileStoreWithFBS = FileStoreBuilder.fileStoreBuilder(getTestDir("tar2")).withBlobStore(fbs).withMaxFileSize(256).withMemoryMapping(true).build();
SegmentNodeStore snsWithFBS = SegmentNodeStoreBuilders.builder(fileStoreWithFBS).build();
SegmentTarFixture segmentTarFixtureFBS = new SegmentTarFixture(snsWithFBS);
if (segmentTarFixtureFBS.isAvailable()) {
fixtures.add(new Object[] { segmentTarFixtureFBS });
}
DocumentMongoFixture documentFixture = new DocumentMongoFixture(MongoUtils.URL, createBlobStore());
if (documentFixture.isAvailable()) {
fixtures.add(new Object[] { documentFixture });
}
return fixtures;
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinC1.
@Test
public void offlineCompactionBinC1() throws Exception {
SegmentGCOptions gcOptions = defaultGCOptions().setOffline().withBinaryDeduplication();
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
int blobSize = 5 * 1024 * 1024;
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
NodeBuilder c1 = content.child("c1");
Blob b1 = nodeStore.createBlob(new ByteArrayInputStream(data));
c1.setProperty("blob1", b1);
NodeBuilder c2 = content.child("c2");
Blob b2 = nodeStore.createBlob(new ByteArrayInputStream(data));
c2.setProperty("blob2", b2);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
assertSize("with compacted binaries", size2, 0, size1 - blobSize);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method offlineCompactionBinC2.
/**
* Create 2 binary nodes with same content but not same reference. Reduce
* the max size if de-duplicated binaries under the binary length. Verify
* de-duplication capabilities of compaction.
*/
@Test
public void offlineCompactionBinC2() throws Exception {
int blobSize = 5 * 1024 * 1024;
SegmentGCOptions gcOptions = defaultGCOptions().setOffline().withBinaryDeduplication().setBinaryDeduplicationMaxSize(blobSize / 2);
ScheduledExecutorService executor = newSingleThreadScheduledExecutor();
FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(1).withGCOptions(gcOptions).withStatisticsProvider(new DefaultStatisticsProvider(executor)).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
try {
NodeBuilder extra = nodeStore.getRoot().builder();
NodeBuilder content = extra.child("content");
byte[] data = new byte[blobSize];
new Random().nextBytes(data);
NodeBuilder c1 = content.child("c1");
Blob b1 = nodeStore.createBlob(new ByteArrayInputStream(data));
c1.setProperty("blob1", b1);
NodeBuilder c2 = content.child("c2");
Blob b2 = nodeStore.createBlob(new ByteArrayInputStream(data));
c2.setProperty("blob2", b2);
nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
int cpNo = 4;
Set<String> cps = new HashSet<String>();
for (int i = 0; i < cpNo; i++) {
cps.add(nodeStore.checkpoint(60000));
}
assertEquals(cpNo, cps.size());
for (String cp : cps) {
assertTrue(nodeStore.retrieve(cp) != null);
}
long size1 = fileStore.getStats().getApproximateSize();
fileStore.compact();
fileStore.cleanup();
long size2 = fileStore.getStats().getApproximateSize();
// not expected to reduce the size too much, as the binaries are
// above the threshold
assertSize("with compacted binaries", size2, size1 * 9 / 10, size1 * 11 / 10);
} finally {
fileStore.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class ManyChildNodesIT method manyChildNodesOnRoot.
@Test
public void manyChildNodesOnRoot() throws IOException, InvalidFileStoreVersionException, CommitFailedException {
try (FileStore fileStore = createFileStore()) {
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
NodeBuilder builder = nodeStore.getRoot().builder();
addManyNodes(builder);
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
}
Aggregations