use of org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore in project jackrabbit-oak by apache.
the class Registrations method registerSegmentStore.
/**
* Configures and registers a new SegmentNodeStore instance together will
* all required components. Anything that must be disposed of (like
* registered services or MBeans) will be registered via the
* {@code registration} parameter.
*
* @param context An instance of {@link ComponentContext}.
* @param blobStore An instance of {@link BlobStore}. It can be
* {@code null}.
* @param segmentStore An instance of {@link SegmentNodeStorePersistence}. It can be
* {@code null}.
* @param statisticsProvider An instance of {@link StatisticsProvider}.
* @param closer An instance of {@link Closer}. It will be used
* to track every registered service or
* component.
* @param whiteboard An instance of {@link Whiteboard}. It will be
* used to register services in the OSGi
* framework.
* @param role The role of this component. It can be {@code
* null}.
* @param descriptors Determines if repository descriptors related to
* discovery services should be registered.
* @return A configured {@link SegmentNodeStore}, or {@code null} if the
* setup failed.
* @throws IOException In case an unrecoverable error occurs.
*/
static SegmentNodeStore registerSegmentStore(@Nonnull ComponentContext context, @Nullable BlobStore blobStore, @Nullable SegmentNodeStorePersistence segmentStore, @Nonnull StatisticsProvider statisticsProvider, @Nonnull Closer closer, @Nonnull Whiteboard whiteboard, @Nullable String role, boolean descriptors) throws IOException {
Configuration configuration = new Configuration(context, role);
Closeables closeables = new Closeables(closer);
Registrations registrations = new Registrations(whiteboard, role);
// Listen for GCMonitor services
GCMonitor gcMonitor = GCMonitor.EMPTY;
if (configuration.isPrimarySegmentStore()) {
GCMonitorTracker tracker = new GCMonitorTracker();
tracker.start(whiteboard);
closeables.add(tracker);
gcMonitor = tracker;
}
// Create the gc options
if (configuration.getCompactionGainThreshold() != null) {
log.warn("Detected deprecated flag 'compaction.gainThreshold'. " + "Please use 'compaction.sizeDeltaEstimation' instead and " + "'compaction.disableEstimation' to disable estimation.");
}
if (configuration.getRetainedGenerations() != RETAINED_GENERATIONS_DEFAULT) {
log.warn("The number of retained generations defaults to {} and can't be " + "changed. This configuration option is considered deprecated " + "and will be removed in the future.", RETAINED_GENERATIONS_DEFAULT);
}
SegmentGCOptions gcOptions = new SegmentGCOptions(configuration.getPauseCompaction(), configuration.getRetryCount(), configuration.getForceCompactionTimeout()).setGcSizeDeltaEstimation(configuration.getSizeDeltaEstimation()).setMemoryThreshold(configuration.getMemoryThreshold()).setEstimationDisabled(configuration.getDisableEstimation()).setGCLogInterval(configuration.getGCProcessLog());
if (configuration.isStandbyInstance()) {
gcOptions.setRetainedGenerations(1);
}
// Build the FileStore
FileStoreBuilder builder = fileStoreBuilder(configuration.getSegmentDirectory()).withSegmentCacheSize(configuration.getSegmentCacheSize()).withStringCacheSize(configuration.getStringCacheSize()).withTemplateCacheSize(configuration.getTemplateCacheSize()).withStringDeduplicationCacheSize(configuration.getStringDeduplicationCacheSize()).withTemplateDeduplicationCacheSize(configuration.getTemplateDeduplicationCacheSize()).withNodeDeduplicationCacheSize(configuration.getNodeDeduplicationCacheSize()).withMaxFileSize(configuration.getMaxFileSize()).withMemoryMapping(configuration.getMemoryMapping()).withGCMonitor(gcMonitor).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).withGCOptions(gcOptions);
if (configuration.hasCustomBlobStore() && blobStore != null) {
log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore);
builder.withBlobStore(blobStore);
}
if (configuration.hasCustomSegmentStore() && segmentStore != null) {
log.info("Initializing SegmentNodeStore with custom persistence [{}]", segmentStore);
builder.withCustomPersistence(segmentStore);
}
if (configuration.isStandbyInstance()) {
builder.withSnfeListener(IGNORE_SNFE);
}
final FileStore store;
try {
store = builder.build();
} catch (InvalidFileStoreVersionException e) {
log.error("The storage format is not compatible with this version of Oak Segment Tar", e);
return null;
}
// store should be closed last
closeables.add(store);
// Listen for Executor services on the whiteboard
WhiteboardExecutor executor = new WhiteboardExecutor();
executor.start(whiteboard);
closeables.add(executor);
// Expose stats about the segment cache
CacheStatsMBean segmentCacheStats = store.getSegmentCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, segmentCacheStats, CacheStats.TYPE, segmentCacheStats.getName()));
// Expose stats about the string and template caches
CacheStatsMBean stringCacheStats = store.getStringCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringCacheStats, CacheStats.TYPE, stringCacheStats.getName()));
CacheStatsMBean templateCacheStats = store.getTemplateCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateCacheStats, CacheStats.TYPE, templateCacheStats.getName()));
WriterCacheManager cacheManager = builder.getCacheManager();
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
if (stringDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringDeduplicationCacheStats, CacheStats.TYPE, stringDeduplicationCacheStats.getName()));
}
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
if (templateDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateDeduplicationCacheStats, CacheStats.TYPE, templateDeduplicationCacheStats.getName()));
}
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
if (nodeDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, nodeDeduplicationCacheStats, CacheStats.TYPE, nodeDeduplicationCacheStats.getName()));
}
// Expose an MBean to managing and monitoring garbage collection
final FileStoreGCMonitor monitor = new FileStoreGCMonitor(Clock.SIMPLE);
closeables.add(registrations.register(GCMonitor.class, monitor));
if (!configuration.isStandbyInstance()) {
closeables.add(registrations.registerMBean(SegmentRevisionGC.class, new SegmentRevisionGCMBean(store, gcOptions, monitor), SegmentRevisionGC.TYPE, "Segment node store revision garbage collection"));
}
Runnable cancelGC = new Runnable() {
@Override
public void run() {
store.cancelGC();
}
};
Supplier<String> statusMessage = new Supplier<String>() {
@Override
public String get() {
return monitor.getStatus();
}
};
closeables.add(registrations.registerMBean(RevisionGCMBean.class, new RevisionGC(store.getGCRunner(), cancelGC, statusMessage, executor), RevisionGCMBean.TYPE, "Revision garbage collection"));
// Expose statistics about the FileStore
closeables.add(registrations.registerMBean(FileStoreStatsMBean.class, store.getStats(), FileStoreStatsMBean.TYPE, "FileStore statistics"));
// register segment node store
SegmentNodeStore.SegmentNodeStoreBuilder segmentNodeStoreBuilder = SegmentNodeStoreBuilders.builder(store).withStatisticsProvider(statisticsProvider);
if (configuration.isStandbyInstance() || !configuration.isPrimarySegmentStore()) {
segmentNodeStoreBuilder.dispatchChanges(false);
}
SegmentNodeStore segmentNodeStore = segmentNodeStoreBuilder.build();
if (configuration.isPrimarySegmentStore()) {
ObserverTracker observerTracker = new ObserverTracker(segmentNodeStore);
observerTracker.start(context.getBundleContext());
closeables.add(observerTracker);
}
if (configuration.isPrimarySegmentStore()) {
closeables.add(registrations.registerMBean(CheckpointMBean.class, new SegmentCheckpointMBean(segmentNodeStore), CheckpointMBean.TYPE, "Segment node store checkpoint management"));
}
if (descriptors) {
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(getOrCreateId(segmentNodeStore)), true, false);
closeables.add(registrations.register(Descriptors.class, clusterIdDesc));
// Register "discovery lite" descriptors
closeables.add(registrations.register(Descriptors.class, new SegmentDiscoveryLiteDescriptors(segmentNodeStore)));
}
// If a shared data store register the repo id in the data store
if (configuration.isPrimarySegmentStore() && isShared(blobStore)) {
SharedDataStore sharedDataStore = (SharedDataStore) blobStore;
try {
sharedDataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(getOrCreateId(segmentNodeStore)));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
trackingStore.addTracker(new BlobIdTracker(configuration.getRepositoryHome(), getOrCreateId(segmentNodeStore), configuration.getBlobSnapshotInterval(), sharedDataStore));
}
}
if (configuration.isPrimarySegmentStore() && blobStore instanceof GarbageCollectableBlobStore) {
BlobGarbageCollector gc = new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) blobStore, executor, TimeUnit.SECONDS.toMillis(configuration.getBlobGcMaxAge()), getOrCreateId(segmentNodeStore), whiteboard);
closeables.add(registrations.registerMBean(BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Segment node store blob garbage collection"));
}
// Expose an MBean for backup/restore operations
closeables.add(registrations.registerMBean(FileStoreBackupRestoreMBean.class, new FileStoreBackupRestoreImpl(segmentNodeStore, store.getRevisions(), store.getReader(), configuration.getBackupDirectory(), executor), FileStoreBackupRestoreMBean.TYPE, "Segment node store backup/restore"));
// Expose statistics about the SegmentNodeStore
closeables.add(registrations.registerMBean(SegmentNodeStoreStatsMBean.class, segmentNodeStore.getStats(), SegmentNodeStoreStatsMBean.TYPE, "SegmentNodeStore statistics"));
if (configuration.isPrimarySegmentStore()) {
log.info("Primary SegmentNodeStore initialized");
} else {
log.info("Secondary SegmentNodeStore initialized, role={}", role);
}
// Register a factory service to expose the FileStore
closeables.add(registrations.register(SegmentStoreProvider.class, new DefaultSegmentStoreProvider(store)));
if (configuration.isStandbyInstance()) {
return segmentNodeStore;
}
if (configuration.isPrimarySegmentStore()) {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Constants.SERVICE_PID, SegmentNodeStore.class.getName());
props.put("oak.nodestore.description", new String[] { "nodeStoreType=segment" });
closeables.add(registrations.register(NodeStore.class, segmentNodeStore, props));
}
return segmentNodeStore;
}
use of org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore in project jackrabbit-oak by apache.
the class ActiveDeletedBlobSyncTrackerTest method createRepository.
@Override
protected ContentRepository createRepository() {
try {
File blobCollectorDeleted = new File(blobCollectionRoot.getRoot(), "deleted-blobs");
blobCollectorDeleted.mkdirs();
adbc = new ActiveDeletedBlobCollectorImpl(clock, new File(blobCollectionRoot.getRoot(), "deleted-blobs"), executorService);
IndexCopier copier = createIndexCopier();
editorProvider = new LuceneIndexEditorProvider(copier, null, new ExtractedTextCache(10 * FileUtils.ONE_MB, 100), null, Mounts.defaultMountInfoProvider(), adbc);
provider = new LuceneIndexProvider(copier);
OakFileDataStore ds = new OakFileDataStore();
ds.setMinRecordLength(10);
ds.init(fileDataStoreRoot.getRoot().getAbsolutePath());
DataStoreBlobStore dsbs = new DataStoreBlobStore(ds);
this.blobStore = new AbstractActiveDeletedBlobTest.CountingBlobStore(dsbs);
FileStore store = FileStoreBuilder.fileStoreBuilder(temporaryFolder.getRoot()).withMemoryMapping(false).withBlobStore(blobStore).build();
nodeStore = SegmentNodeStoreBuilders.builder(store).build();
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
trackingStore.addTracker(new BlobIdTracker(blobTrackerRoot.getRoot().getAbsolutePath(), getOrCreateId(nodeStore), 600, dsbs));
// set the blob store to skip writing blobs through the node store
editorProvider.setBlobStore(blobStore);
asyncIndexUpdate = new AsyncIndexUpdate("async", nodeStore, editorProvider);
return new Oak(nodeStore).with(new InitialContent()).with(new OpenSecurityProvider()).with((QueryIndexProvider) provider).with((Observer) provider).with(editorProvider).createContentRepository();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore in project jackrabbit-oak by apache.
the class DocumentNodeStoreService method registerNodeStore.
private void registerNodeStore() throws IOException {
DocumentNodeStoreBuilder<?> mkBuilder;
if (documentStoreType == DocumentStoreType.RDB) {
RDBDocumentNodeStoreBuilder builder = newRDBDocumentNodeStoreBuilder();
configureBuilder(builder);
checkNotNull(dataSource, "DataStore type set [%s] but DataSource reference not initialized", PROP_DS_TYPE);
if (!customBlobStore) {
checkNotNull(blobDataSource, "DataStore type set [%s] but BlobDataSource reference not initialized", PROP_DS_TYPE);
builder.setRDBConnection(dataSource, blobDataSource);
log.info("Connected to datasources {} {}", dataSource, blobDataSource);
} else {
if (blobDataSource != null && blobDataSource != dataSource) {
log.info("Ignoring blobDataSource {} as custom blob store takes precedence.", blobDataSource);
}
builder.setRDBConnection(dataSource);
log.info("Connected to datasource {}", dataSource);
}
mkBuilder = builder;
} else {
String uri = config.mongouri();
String db = config.db();
boolean soKeepAlive = config.socketKeepAlive();
MongoClientURI mongoURI = new MongoClientURI(uri);
String persistentCache = resolvePath(config.persistentCache(), DEFAULT_PERSISTENT_CACHE);
String journalCache = resolvePath(config.journalCache(), DEFAULT_JOURNAL_CACHE);
if (log.isInfoEnabled()) {
// Take care around not logging the uri directly as it
// might contain passwords
log.info("Starting DocumentNodeStore with host={}, db={}, cache size (MB)={}, persistentCache={}, " + "journalCache={}, blobCacheSize (MB)={}, maxReplicationLagInSecs={}", mongoURI.getHosts(), db, config.cache(), persistentCache, journalCache, config.blobCacheSize(), config.maxReplicationLagInSecs());
log.info("Mongo Connection details {}", MongoConnection.toString(mongoURI.getOptions()));
}
MongoDocumentNodeStoreBuilder builder = newMongoDocumentNodeStoreBuilder();
configureBuilder(builder);
builder.setMaxReplicationLag(config.maxReplicationLagInSecs(), TimeUnit.SECONDS);
builder.setSocketKeepAlive(soKeepAlive);
builder.setMongoDB(uri, db, config.blobCacheSize());
mkBuilder = builder;
log.info("Connected to database '{}'", db);
}
if (!customBlobStore) {
defaultBlobStore = mkBuilder.getBlobStore();
log.info("Registering the BlobStore with ServiceRegistry");
blobStoreReg = context.getBundleContext().registerService(BlobStore.class.getName(), defaultBlobStore, null);
}
// Set wrapping blob store after setting the DB
if (isWrappingCustomBlobStore()) {
((BlobStoreWrapper) blobStore).setBlobStore(mkBuilder.getBlobStore());
mkBuilder.setBlobStore(blobStore);
}
// attach GCMonitor
final GCMonitorTracker gcMonitor = new GCMonitorTracker();
gcMonitor.start(whiteboard);
closer.register(asCloseable(gcMonitor));
Logger vgcLogger = LoggerFactory.getLogger(VersionGarbageCollector.class);
GCMonitor loggingGCMonitor;
if (isContinuousRevisionGC()) {
// log less chatty with continuous RevisionGC
loggingGCMonitor = new QuietGCMonitor(vgcLogger);
} else {
loggingGCMonitor = new LoggingGCMonitor(vgcLogger);
}
mkBuilder.setGCMonitor(new DelegatingGCMonitor(newArrayList(gcMonitor, loggingGCMonitor)));
nodeStore = mkBuilder.build();
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(ClusterRepositoryInfo.getOrCreateId(nodeStore)), true, false);
whiteboard.register(Descriptors.class, clusterIdDesc, Collections.emptyMap());
// If a shared data store register the repo id in the data store
if (SharedDataStoreUtils.isShared(blobStore)) {
String repoId = null;
try {
repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(repoId));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
((BlobTrackingStore) blobStore).addTracker(new BlobIdTracker(getRepositoryHome(), repoId, config.blobTrackSnapshotIntervalInSecs(), (SharedDataStore) blobStore));
}
}
registerJMXBeans(nodeStore, mkBuilder);
registerLastRevRecoveryJob(nodeStore);
registerJournalGC(nodeStore);
registerVersionGCJob(nodeStore);
registerDocumentStoreMetrics(mkBuilder.getDocumentStore());
if (!isNodeStoreProvider()) {
observerTracker = new ObserverTracker(nodeStore);
observerTracker.start(context.getBundleContext());
}
journalPropertyHandlerFactory.start(whiteboard);
DocumentStore ds = nodeStore.getDocumentStore();
// OAK-2682: time difference detection applied at startup with a default
// max time diff of 2000 millis (2sec)
final long maxDiff = Long.parseLong(System.getProperty("oak.documentMK.maxServerTimeDiffMillis", "2000"));
try {
if (maxDiff >= 0) {
final long timeDiff = ds.determineServerTimeDifferenceMillis();
log.info("registerNodeStore: server time difference: {}ms (max allowed: {}ms)", timeDiff, maxDiff);
if (Math.abs(timeDiff) > maxDiff) {
throw new AssertionError("Server clock seems off (" + timeDiff + "ms) by more than configured amount (" + maxDiff + "ms)");
}
}
} catch (RuntimeException e) {
// no checked exception
// in case of a RuntimeException, just log but continue
log.warn("registerNodeStore: got RuntimeException while trying to determine time difference to server: " + e, e);
}
String[] serviceClasses;
if (isNodeStoreProvider()) {
registerNodeStoreProvider(nodeStore);
serviceClasses = new String[] { DocumentNodeStore.class.getName(), Clusterable.class.getName() };
} else {
serviceClasses = new String[] { NodeStore.class.getName(), DocumentNodeStore.class.getName(), Clusterable.class.getName() };
}
Dictionary<String, Object> props = new Hashtable<String, Object>();
props.put(Constants.SERVICE_PID, DocumentNodeStore.class.getName());
props.put(DESCRIPTION, getMetadata(ds));
// OAK-2844: in order to allow DocumentDiscoveryLiteService to directly
// require a service DocumentNodeStore (instead of having to do an 'instanceof')
// the registration is now done for both NodeStore and DocumentNodeStore here.
nodeStoreReg = context.getBundleContext().registerService(serviceClasses, nodeStore, props);
}
use of org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore in project jackrabbit-oak by apache.
the class Registrations method registerSegmentStore.
/**
* Configures and registers a new SegmentNodeStore instance together will
* all required components. Anything that must be disposed of (like
* registered services or MBeans) will be registered via the
* {@code registration} parameter.
*
* @param context An instance of {@link ComponentContext}.
* @param blobStore An instance of {@link BlobStore}. It can be
* {@code null}.
* @param statisticsProvider An instance of {@link StatisticsProvider}.
* @param closer An instance of {@link Closer}. It will be used
* to track every registered service or
* component.
* @param whiteboard An instance of {@link Whiteboard}. It will be
* used to register services in the OSGi
* framework.
* @param role The role of this component. It can be {@code
* null}.
* @param descriptors Determines if repository descriptors related to
* discovery services should be registered.
* @return A configured {@link SegmentNodeStore}, or {@code null} if the
* setup failed.
* @throws IOException In case an unrecoverable error occurs.
*/
static SegmentNodeStore registerSegmentStore(@Nonnull ComponentContext context, @Nullable BlobStore blobStore, @Nonnull StatisticsProvider statisticsProvider, @Nonnull Closer closer, @Nonnull Whiteboard whiteboard, @Nullable String role, boolean descriptors) throws IOException {
Configuration configuration = new Configuration(context, role);
Closeables closeables = new Closeables(closer);
Registrations registrations = new Registrations(whiteboard, role);
// Listen for GCMonitor services
GCMonitor gcMonitor = GCMonitor.EMPTY;
if (configuration.isPrimarySegmentStore()) {
GCMonitorTracker tracker = new GCMonitorTracker();
tracker.start(whiteboard);
closeables.add(tracker);
gcMonitor = tracker;
}
// Create the gc options
if (configuration.getCompactionGainThreshold() != null) {
log.warn("Detected deprecated flag 'compaction.gainThreshold'. " + "Please use 'compaction.sizeDeltaEstimation' instead and " + "'compaction.disableEstimation' to disable estimation.");
}
SegmentGCOptions gcOptions = new SegmentGCOptions(configuration.getPauseCompaction(), configuration.getRetryCount(), configuration.getForceCompactionTimeout()).setRetainedGenerations(configuration.getRetainedGenerations()).setGcSizeDeltaEstimation(configuration.getSizeDeltaEstimation()).setMemoryThreshold(configuration.getMemoryThreshold()).setEstimationDisabled(configuration.getDisableEstimation()).withGCNodeWriteMonitor(configuration.getGCProcessLog());
// Build the FileStore
FileStoreBuilder builder = fileStoreBuilder(configuration.getSegmentDirectory()).withSegmentCacheSize(configuration.getSegmentCacheSize()).withStringCacheSize(configuration.getStringCacheSize()).withTemplateCacheSize(configuration.getTemplateCacheSize()).withStringDeduplicationCacheSize(configuration.getStringDeduplicationCacheSize()).withTemplateDeduplicationCacheSize(configuration.getTemplateDeduplicationCacheSize()).withNodeDeduplicationCacheSize(configuration.getNodeDeduplicationCacheSize()).withMaxFileSize(configuration.getMaxFileSize()).withMemoryMapping(configuration.getMemoryMapping()).withGCMonitor(gcMonitor).withIOMonitor(new MetricsIOMonitor(statisticsProvider)).withStatisticsProvider(statisticsProvider).withGCOptions(gcOptions);
if (configuration.hasCustomBlobStore() && blobStore != null) {
log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore);
builder.withBlobStore(blobStore);
}
if (configuration.isStandbyInstance()) {
builder.withSnfeListener(IGNORE_SNFE);
}
final FileStore store;
try {
store = builder.build();
} catch (InvalidFileStoreVersionException e) {
log.error("The storage format is not compatible with this version of Oak Segment Tar", e);
return null;
}
// store should be closed last
closeables.add(store);
// Listen for Executor services on the whiteboard
WhiteboardExecutor executor = new WhiteboardExecutor();
executor.start(whiteboard);
closeables.add(executor);
// Expose stats about the segment cache
CacheStatsMBean segmentCacheStats = store.getSegmentCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, segmentCacheStats, CacheStats.TYPE, segmentCacheStats.getName()));
// Expose stats about the string and template caches
CacheStatsMBean stringCacheStats = store.getStringCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringCacheStats, CacheStats.TYPE, stringCacheStats.getName()));
CacheStatsMBean templateCacheStats = store.getTemplateCacheStats();
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateCacheStats, CacheStats.TYPE, templateCacheStats.getName()));
WriterCacheManager cacheManager = builder.getCacheManager();
CacheStatsMBean stringDeduplicationCacheStats = cacheManager.getStringCacheStats();
if (stringDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, stringDeduplicationCacheStats, CacheStats.TYPE, stringDeduplicationCacheStats.getName()));
}
CacheStatsMBean templateDeduplicationCacheStats = cacheManager.getTemplateCacheStats();
if (templateDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, templateDeduplicationCacheStats, CacheStats.TYPE, templateDeduplicationCacheStats.getName()));
}
CacheStatsMBean nodeDeduplicationCacheStats = cacheManager.getNodeCacheStats();
if (nodeDeduplicationCacheStats != null) {
closeables.add(registrations.registerMBean(CacheStatsMBean.class, nodeDeduplicationCacheStats, CacheStats.TYPE, nodeDeduplicationCacheStats.getName()));
}
if (configuration.isPrimarySegmentStore()) {
final FileStoreGCMonitor monitor = new FileStoreGCMonitor(Clock.SIMPLE);
closeables.add(registrations.register(GCMonitor.class, monitor));
if (!configuration.isStandbyInstance()) {
closeables.add(registrations.registerMBean(SegmentRevisionGC.class, new SegmentRevisionGCMBean(store, gcOptions, monitor), SegmentRevisionGC.TYPE, "Segment node store revision garbage collection"));
}
Runnable cancelGC = new Runnable() {
@Override
public void run() {
store.cancelGC();
}
};
Supplier<String> statusMessage = new Supplier<String>() {
@Override
public String get() {
return monitor.getStatus();
}
};
closeables.add(registrations.registerMBean(RevisionGCMBean.class, new RevisionGC(store.getGCRunner(), cancelGC, statusMessage, executor), RevisionGCMBean.TYPE, "Revision garbage collection"));
}
// Expose statistics about the FileStore
closeables.add(registrations.registerMBean(FileStoreStatsMBean.class, store.getStats(), FileStoreStatsMBean.TYPE, "FileStore statistics"));
// register segment node store
SegmentNodeStore.SegmentNodeStoreBuilder segmentNodeStoreBuilder = SegmentNodeStoreBuilders.builder(store).withStatisticsProvider(statisticsProvider);
if (configuration.isStandbyInstance() || !configuration.isPrimarySegmentStore()) {
segmentNodeStoreBuilder.dispatchChanges(false);
}
SegmentNodeStore segmentNodeStore = segmentNodeStoreBuilder.build();
if (configuration.isPrimarySegmentStore()) {
ObserverTracker observerTracker = new ObserverTracker(segmentNodeStore);
observerTracker.start(context.getBundleContext());
closeables.add(observerTracker);
}
if (configuration.isPrimarySegmentStore()) {
closeables.add(registrations.registerMBean(CheckpointMBean.class, new SegmentCheckpointMBean(segmentNodeStore), CheckpointMBean.TYPE, "Segment node store checkpoint management"));
}
if (descriptors) {
// ensure a clusterId is initialized
// and expose it as 'oak.clusterid' repository descriptor
GenericDescriptors clusterIdDesc = new GenericDescriptors();
clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, new SimpleValueFactory().createValue(getOrCreateId(segmentNodeStore)), true, false);
closeables.add(registrations.register(Descriptors.class, clusterIdDesc));
// Register "discovery lite" descriptors
closeables.add(registrations.register(Descriptors.class, new SegmentDiscoveryLiteDescriptors(segmentNodeStore)));
}
// If a shared data store register the repo id in the data store
if (configuration.isPrimarySegmentStore() && isShared(blobStore)) {
SharedDataStore sharedDataStore = (SharedDataStore) blobStore;
try {
sharedDataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(getOrCreateId(segmentNodeStore)));
} catch (Exception e) {
throw new IOException("Could not register a unique repositoryId", e);
}
if (blobStore instanceof BlobTrackingStore) {
BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore;
if (trackingStore.getTracker() != null) {
trackingStore.getTracker().close();
}
trackingStore.addTracker(new BlobIdTracker(configuration.getRepositoryHome(), getOrCreateId(segmentNodeStore), configuration.getBlobSnapshotInterval(), sharedDataStore));
}
}
if (configuration.isPrimarySegmentStore() && blobStore instanceof GarbageCollectableBlobStore) {
BlobGarbageCollector gc = new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) blobStore, executor, TimeUnit.SECONDS.toMillis(configuration.getBlobGcMaxAge()), getOrCreateId(segmentNodeStore));
closeables.add(registrations.registerMBean(BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Segment node store blob garbage collection"));
}
// Expose an MBean for backup/restore operations
closeables.add(registrations.registerMBean(FileStoreBackupRestoreMBean.class, new FileStoreBackupRestoreImpl(segmentNodeStore, store.getRevisions(), store.getReader(), configuration.getBackupDirectory(), executor), FileStoreBackupRestoreMBean.TYPE, "Segment node store backup/restore"));
// Expose statistics about the SegmentNodeStore
closeables.add(registrations.registerMBean(SegmentNodeStoreStatsMBean.class, segmentNodeStore.getStats(), SegmentNodeStoreStatsMBean.TYPE, "SegmentNodeStore statistics"));
if (configuration.isPrimarySegmentStore()) {
log.info("Primary SegmentNodeStore initialized");
} else {
log.info("Secondary SegmentNodeStore initialized, role={}", role);
}
// Register a factory service to expose the FileStore
closeables.add(registrations.register(SegmentStoreProvider.class, new DefaultSegmentStoreProvider(store)));
if (configuration.isStandbyInstance()) {
return segmentNodeStore;
}
if (configuration.isPrimarySegmentStore()) {
Map<String, Object> props = new HashMap<String, Object>();
props.put(Constants.SERVICE_PID, SegmentNodeStore.class.getName());
props.put("oak.nodestore.description", new String[] { "nodeStoreType=segment" });
closeables.add(registrations.register(NodeStore.class, segmentNodeStore, props));
}
return segmentNodeStore;
}
Aggregations