use of org.apache.jackrabbit.oak.segment.file.FileStoreBuilder in project jackrabbit-oak by apache.
the class SegmentToExternalMigrationTest method createNodeStore.
@Override
protected NodeStore createNodeStore(BlobStore blobStore, File repository) throws IOException {
File segmentDir = new File(repository, "segmentstore");
FileStoreBuilder builder = fileStoreBuilder(segmentDir);
if (blobStore != null) {
builder.withBlobStore(blobStore);
}
try {
store = builder.build();
} catch (InvalidFileStoreVersionException e) {
throw new IllegalStateException(e);
}
return SegmentNodeStoreBuilders.builder(store).build();
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreBuilder in project jackrabbit-oak by apache.
the class TemporaryFileStore method before.
@Override
protected void before() throws Throwable {
executor = Executors.newSingleThreadScheduledExecutor();
FileStoreBuilder builder = fileStoreBuilder(folder.newFolder()).withMaxFileSize(1).withMemoryMapping(false).withNodeDeduplicationCacheSize(1).withSegmentCacheSize(0).withStringCacheSize(0).withTemplateCacheSize(0).withStatisticsProvider(new DefaultStatisticsProvider(executor));
if (standby) {
builder.withSnfeListener(SegmentNotFoundExceptionListener.IGNORE_SNFE);
}
if (blobStore != null) {
builder.withBlobStore(blobStore.blobStore());
}
store = builder.build();
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreBuilder in project jackrabbit-oak by apache.
the class SegmentCompactionIT method setUp.
@Before
public void setUp() throws Exception {
assumeTrue(ENABLED);
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
MetricStatisticsProvider statisticsProvider = new MetricStatisticsProvider(mBeanServer, executor);
SegmentGCOptions gcOptions = defaultGCOptions().setEstimationDisabled(true).setForceTimeout(3600);
FileStoreBuilder builder = fileStoreBuilder(folder.getRoot());
fileStore = builder.withMemoryMapping(true).withGCMonitor(gcMonitor).withGCOptions(gcOptions).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).build();
nodeStore = SegmentNodeStoreBuilders.builder(fileStore).withStatisticsProvider(statisticsProvider).build();
WriterCacheManager cacheManager = builder.getCacheManager();
Runnable cancelGC = new Runnable() {
@Override
public void run() {
fileStore.cancelGC();
}
};
Supplier<String> status = new Supplier<String>() {
@Override
public String get() {
return fileStoreGCMonitor.getStatus();
}
};
List<Registration> registrations = newArrayList();
registrations.add(registerMBean(segmentCompactionMBean, new ObjectName("IT:TYPE=Segment Compaction")));
registrations.add(registerMBean(new SegmentRevisionGCMBean(fileStore, gcOptions, fileStoreGCMonitor), new ObjectName("IT:TYPE=Segment Revision GC")));
registrations.add(registerMBean(new RevisionGC(fileStore.getGCRunner(), cancelGC, status, executor), new ObjectName("IT:TYPE=Revision GC")));
CacheStatsMBean segmentCacheStats = fileStore.getSegmentCacheStats();
registrations.add(registerMBean(segmentCacheStats, new ObjectName("IT:TYPE=" + segmentCacheStats.getName())));
CacheStatsMBean stringCacheStats = fileStore.getStringCacheStats();
registrations.add(registerMBean(stringCacheStats, new ObjectName("IT:TYPE=" + stringCacheStats.getName())));
CacheStatsMBean templateCacheStats = fileStore.getTemplateCacheStats();
registrations.add(registerMBean(templateCacheStats, new ObjectName("IT:TYPE=" + templateCacheStats.getName())));
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
assertNotNull(stringDeduplicationCacheStats);
registrations.add(registerMBean(stringDeduplicationCacheStats, new ObjectName("IT:TYPE=" + stringDeduplicationCacheStats.getName())));
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
assertNotNull(templateDeduplicationCacheStats);
registrations.add(registerMBean(templateDeduplicationCacheStats, new ObjectName("IT:TYPE=" + templateDeduplicationCacheStats.getName())));
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
assertNotNull(nodeDeduplicationCacheStats);
registrations.add(registerMBean(nodeDeduplicationCacheStats, new ObjectName("IT:TYPE=" + nodeDeduplicationCacheStats.getName())));
registrations.add(registerMBean(nodeStore.getStats(), new ObjectName("IT:TYPE=" + "SegmentNodeStore statistics")));
mBeanRegistration = new CompositeRegistration(registrations);
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreBuilder in project jackrabbit-oak by apache.
the class SegmentDataStoreBlobGCIT method getNodeStore.
private SegmentNodeStore getNodeStore(BlobStore blobStore) throws Exception {
if (nodeStore == null) {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
FileStoreBuilder builder = fileStoreBuilder(getWorkDir()).withNodeDeduplicationCacheSize(16384).withBlobStore(blobStore).withMaxFileSize(256).withMemoryMapping(false).withStatisticsProvider(new DefaultStatisticsProvider(executor)).withGCOptions(gcOptions);
store = builder.build();
nodeStore = SegmentNodeStoreBuilders.builder(store).build();
}
return nodeStore;
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreBuilder in project jackrabbit-oak by apache.
the class FileStoreBackupImpl method backup.
@Override
public void backup(@Nonnull SegmentReader reader, @Nonnull Revisions revisions, @Nonnull File destination) throws IOException, InvalidFileStoreVersionException {
Stopwatch watch = Stopwatch.createStarted();
SegmentGCOptions gcOptions = SegmentGCOptions.defaultGCOptions().setOffline();
FileStoreBuilder builder = fileStoreBuilder(destination).withDefaultMemoryMapping();
if (USE_FAKE_BLOBSTORE) {
builder.withBlobStore(new BasicReadOnlyBlobStore());
}
builder.withGCOptions(gcOptions);
FileStore backup = builder.build();
SegmentNodeState current = reader.readHeadState(revisions);
try {
int gen = current.getRecordId().getSegmentId().getGcGeneration();
SegmentBufferWriter bufferWriter = new SegmentBufferWriter(backup.getSegmentIdProvider(), backup.getReader(), "b", gen);
SegmentWriter writer = new SegmentWriter(backup, backup.getReader(), backup.getSegmentIdProvider(), backup.getBlobStore(), new WriterCacheManager.Default(), bufferWriter);
Compactor compactor = new Compactor(backup.getReader(), writer, backup.getBlobStore(), Suppliers.ofInstance(false), gcOptions);
compactor.setContentEqualityCheck(true);
SegmentNodeState head = backup.getHead();
SegmentNodeState after = compactor.compact(head, current, head);
if (after != null) {
backup.getRevisions().setHead(head.getRecordId(), after.getRecordId());
}
} finally {
backup.close();
}
backup = fileStoreBuilder(destination).withDefaultMemoryMapping().withGCOptions(gcOptions).build();
try {
cleanup(backup);
} finally {
backup.close();
}
watch.stop();
log.info("Backup finished in {}.", watch);
}
Aggregations