use of org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupNoNewGen.
/**
* Test asserting OAK-4669: No new generation of tar should be created when the segments are the same
* and when various indices are created.
*/
@Test
public void concurrentWritesCleanupNoNewGen() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
File fileStoreFolder = getFileStoreFolder();
final FileStore fileStore = fileStoreBuilder(fileStoreFolder).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(5);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 512 * 512));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 5; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
for (Future<?> result : results) {
assertNull(result.get());
}
fileStore.cleanup();
for (String fileName : fileStoreFolder.list()) {
if (fileName.endsWith(".tar")) {
int pos = fileName.length() - "a.tar".length();
char generation = fileName.charAt(pos);
assertTrue("Expected generation is 'a', but instead was: '" + generation + "' for file " + fileName, generation == 'a');
}
}
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor in project jackrabbit-oak by apache.
the class Registrations method registerSegmentStore.
/**
* Configures and registers a new SegmentNodeStore instance together will
* all required components. Anything that must be disposed of (like
* registered services or MBeans) will be registered via the
* {@code registration} parameter.
*
* @param context An instance of {@link ComponentContext}.
* @param blobStore An instance of {@link BlobStore}. It can be
* {@code null}.
* @param statisticsProvider An instance of {@link StatisticsProvider}.
* @param closer An instance of {@link Closer}. It will be used
* to track every registered service or
* component.
* @param whiteboard An instance of {@link Whiteboard}. It will be
* used to register services in the OSGi
* framework.
* @param role The role of this component. It can be {@code
* null}.
* @param descriptors Determines if repository descriptors related to
* discovery services should be registered.
* @return A configured {@link SegmentNodeStore}, or {@code null} if the
* setup failed.
* @throws IOException In case an unrecoverable error occurs.
*/
static SegmentNodeStore registerSegmentStore(@Nonnull ComponentContext context, @Nullable BlobStore blobStore, @Nonnull StatisticsProvider statisticsProvider, @Nonnull Closer closer, @Nonnull Whiteboard whiteboard, @Nullable String role, boolean descriptors) throws IOException {
Configuration configuration = new Configuration(context, role);
Closeables closeables = new Closeables(closer);
Registrations registrations = new Registrations(whiteboard, role);
// Listen for GCMonitor services
GCMonitor gcMonitor = GCMonitor.EMPTY;
if (configuration.isPrimarySegmentStore()) {
GCMonitorTracker tracker = new GCMonitorTracker();
tracker.start(whiteboard);
closeables.add(tracker);
gcMonitor = tracker;
}
// Create the gc options
if (configuration.getCompactionGainThreshold() != null) {
log.warn("Detected deprecated flag 'compaction.gainThreshold'. " + "Please use 'compaction.sizeDeltaEstimation' instead and " + "'compaction.disableEstimation' to disable estimation.");
}
SegmentGCOptions gcOptions = new SegmentGCOptions(configuration.getPauseCompaction(), configuration.getRetryCount(), configuration.getForceCompactionTimeout()).setRetainedGenerations(configuration.getRetainedGenerations()).setGcSizeDeltaEstimation(configuration.getSizeDeltaEstimation()).setMemoryThreshold(configuration.getMemoryThreshold()).setEstimationDisabled(configuration.getDisableEstimation()).withGCNodeWriteMonitor(configuration.getGCProcessLog());
// Build the FileStore
FileStoreBuilder builder = fileStoreBuilder(configuration.getSegmentDirectory()).withSegmentCacheSize(configuration.getSegmentCacheSize()).withStringCacheSize(configuration.getStringCacheSize()).withTemplateCacheSize(configuration.getTemplateCacheSize()).withStringDeduplicationCacheSize(configuration.getStringDeduplicationCacheSize()).withTemplateDeduplicationCacheSize(configuration.getTemplateDeduplicationCacheSize()).withNodeDeduplicationCacheSize(configuration.getNodeDeduplicationCacheSize()).withMaxFileSize(configuration.getMaxFileSize()).withMemoryMapping(configuration.getMemoryMapping()).withGCMonitor(gcMonitor).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).withGCOptions(gcOptions);
if (configuration.hasCustomBlobStore() && blobStore != null) {
log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore);
builder.withBlobStore(blobStore);
}
if (configuration.isStandbyInstance()) {
builder.withSnfeListener(IGNORE_SNFE);
}
final FileStore store;
try {
store = builder.build();
} catch (InvalidFileStoreVersionException e) {
log.error("The storage format is not compatible with this version of Oak Segment Tar", e);
return null;
}
// store should be closed last
closeables.add(store);
// Listen for Executor services on the whiteboard
WhiteboardExecutor executor = new WhiteboardExecutor();
executor.start(whiteboard);
closeables.add(executor);
// Expose stats about the segment cache
CacheStatsMBean segmentCacheStats = store.getSegmentCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, segmentCacheStats, CacheStats.TYPE, segmentCacheStats.getName()));
// Expose stats about the string and template caches
CacheStatsMBean stringCacheStats = store.getStringCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringCacheStats, CacheStats.TYPE, stringCacheStats.getName()));
CacheStatsMBean templateCacheStats = store.getTemplateCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateCacheStats, CacheStats.TYPE, templateCacheStats.getName()));
WriterCacheManager cacheManager = builder.getCacheManager();
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
if (stringDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringDeduplicationCacheStats, CacheStats.TYPE, stringDeduplicationCacheStats.getName()));
}
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
if (templateDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateDeduplicationCacheStats, CacheStats.TYPE, templateDeduplicationCacheStats.getName()));
}
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
if (nodeDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, nodeDeduplicationCacheStats, CacheStats.TYPE, nodeDeduplicationCacheStats.getName()));
}
if (configuration.isPrimarySegmentStore()) {
final FileStoreGCMonitor monitor = new FileStoreGCMonitor(Clock.SIMPLE);
closeables.add(registrations.register(GCMonitor.class, monitor));
if (!configuration.isStandbyInstance()) {
closeables.add(registrations.registerMBean(SegmentRevisionGC.class, new SegmentRevisionGCMBean(store, gcOptions, monitor), SegmentRevisionGC.TYPE, "Segment node store revision garbage collection"));
}
Runnable cancelGC = new Runnable() {
@Override
public void run() {
store.cancelGC();
}
};
Supplier<String> statusMessage = new Supplier<String>() {
@Override
public String get() {
return monitor.getStatus();
}
};
closeables.add(registrations.registerMBean(RevisionGCMBean.class, new RevisionGC(store.getGCRunner(), cancelGC, statusMessage, executor), RevisionGCMBean.TYPE, "Revision garbage collection"));
}
// Expose statistics about the FileStore
closeables.add(registrations.registerMBean(FileStoreStatsMBean.class, store.getStats(), FileStoreStatsMBean.TYPE, "FileStore statistics"));
// register segment node store
SegmentNodeStore.SegmentNodeStoreBuilder segmentNodeStoreBuilder = SegmentNodeStoreBuilders.builder(store).withStatisticsProvider(statisticsProvider);
if (configuration.isStandbyInstance() || !configuration.isPrimarySegmentStore()) {
segmentNodeStoreBuilder.dispatchChanges(false);
}
SegmentNodeStore segmentNodeStore = segmentNodeStoreBuilder.build();
if (configuration.isPrimarySegmentStore()) {
ObserverTracker observerTracker = new ObserverTracker(segmentNodeStore);
observerTracker.start(context.getBundleContext());
closeables.add(observerTracker);
}
if (configuration.isPrimarySegmentStore()) {
closeables.add(registrations.registerMBean(CheckpointMBean.class, new SegmentCheckpointMBean(segmentNodeStore), CheckpointMBean.TYPE, "Segment node store checkpoint management"));
}
if (descriptors) {
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(getOrCreateId(segmentNodeStore)), true, false);
closeables.add(registrations.register(Descriptors.class, clusterIdDesc));
// Register "discovery lite" descriptors
closeables.add(registrations.register(Descriptors.class, new SegmentDiscoveryLiteDescriptors(segmentNodeStore)));
}
// If a shared data store register the repo id in the data store
if (configuration.isPrimarySegmentStore() && isShared(blobStore)) {
SharedDataStore sharedDataStore = (SharedDataStore) blobStore;
try {
sharedDataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(getOrCreateId(segmentNodeStore)));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
trackingStore.addTracker(new BlobIdTracker(configuration.getRepositoryHome(), getOrCreateId(segmentNodeStore), configuration.getBlobSnapshotInterval(), sharedDataStore));
}
}
if (configuration.isPrimarySegmentStore() && blobStore instanceof GarbageCollectableBlobStore) {
BlobGarbageCollector gc = new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) blobStore, executor, TimeUnit.SECONDS.toMillis(configuration.getBlobGcMaxAge()), getOrCreateId(segmentNodeStore));
closeables.add(registrations.registerMBean(BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Segment node store blob garbage collection"));
}
// Expose an MBean for backup/restore operations
closeables.add(registrations.registerMBean(FileStoreBackupRestoreMBean.class, new FileStoreBackupRestoreImpl(segmentNodeStore, store.getRevisions(), store.getReader(), configuration.getBackupDirectory(), executor), FileStoreBackupRestoreMBean.TYPE, "Segment node store backup/restore"));
// Expose statistics about the SegmentNodeStore
closeables.add(registrations.registerMBean(SegmentNodeStoreStatsMBean.class, segmentNodeStore.getStats(), SegmentNodeStoreStatsMBean.TYPE, "SegmentNodeStore statistics"));
if (configuration.isPrimarySegmentStore()) {
log.info("Primary SegmentNodeStore initialized");
} else {
log.info("Secondary SegmentNodeStore initialized, role={}", role);
}
// Register a factory service to expose the FileStore
closeables.add(registrations.register(SegmentStoreProvider.class, new DefaultSegmentStoreProvider(store)));
if (configuration.isStandbyInstance()) {
return segmentNodeStore;
}
if (configuration.isPrimarySegmentStore()) {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Constants.SERVICE_PID, SegmentNodeStore.class.getName());
props.put("oak.nodestore.description", new String[] { "nodeStoreType=segment" });
closeables.add(registrations.register(NodeStore.class, segmentNodeStore, props));
}
return segmentNodeStore;
}
use of org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor in project jackrabbit-oak by apache.
the class CompactionAndCleanupIT method concurrentWritesCleanupZeroReclaimedSize.
@Test
public void concurrentWritesCleanupZeroReclaimedSize() throws Exception {
ScheduledExecutorService scheduler = newSingleThreadScheduledExecutor();
StatisticsProvider statsProvider = new DefaultStatisticsProvider(scheduler);
final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE);
final FileStore fileStore = fileStoreBuilder(getFileStoreFolder()).withGCOptions(defaultGCOptions().setRetainedGenerations(2)).withGCMonitor(fileStoreGCMonitor).withStatisticsProvider(statsProvider).withMaxFileSize(1).withMemoryMapping(false).build();
final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
ExecutorService executorService = newFixedThreadPool(100);
final AtomicInteger counter = new AtomicInteger();
try {
Callable<Void> concurrentWriteTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
NodeBuilder builder = nodeStore.getRoot().builder();
builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25));
nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
fileStore.flush();
return null;
}
};
List<Future<?>> results = newArrayList();
for (int i = 0; i < 100; i++) {
results.add(executorService.submit(concurrentWriteTask));
}
Thread.sleep(100);
fileStore.cleanup();
for (Future<?> result : results) {
assertNull(result.get());
}
long reclaimedSize = fileStoreGCMonitor.getLastReclaimedSize();
assertEquals("Reclaimed size expected is 0, but instead was: " + reclaimedSize, 0, reclaimedSize);
} finally {
new ExecutorCloser(executorService).close();
fileStore.close();
new ExecutorCloser(scheduler).close();
}
}
Aggregations