use of org.neo4j.configuration.helpers.DatabaseReadOnlyChecker in project neo4j by neo4j.
the class Recovery method instantiateRecoveryExtensions.
private static DatabaseExtensions instantiateRecoveryExtensions(DatabaseLayout databaseLayout, FileSystemAbstraction fileSystem, Config config, LogService logService, PageCache pageCache, JobScheduler jobScheduler, DbmsInfo dbmsInfo, Monitors monitors, TokenHolders tokenHolders, RecoveryCleanupWorkCollector recoveryCleanupCollector, DatabaseReadOnlyChecker readOnlyChecker, Iterable<ExtensionFactory<?>> extensionFactories, PageCacheTracer pageCacheTracer) {
List<ExtensionFactory<?>> recoveryExtensions = stream(extensionFactories).filter(extension -> extension.getClass().isAnnotationPresent(RecoveryExtension.class)).collect(toList());
Dependencies deps = new Dependencies();
NonListenableMonitors nonListenableMonitors = new NonListenableMonitors(monitors, logService.getInternalLogProvider());
deps.satisfyDependencies(fileSystem, config, logService, pageCache, nonListenableMonitors, jobScheduler, tokenHolders, recoveryCleanupCollector, pageCacheTracer, databaseLayout, readOnlyChecker);
DatabaseExtensionContext extensionContext = new DatabaseExtensionContext(databaseLayout, dbmsInfo, deps);
return new DatabaseExtensions(extensionContext, recoveryExtensions, deps, ExtensionFailureStrategies.fail());
}
use of org.neo4j.configuration.helpers.DatabaseReadOnlyChecker in project neo4j by neo4j.
the class IdGeneratorMigrator method migrateIdFiles.
private void migrateIdFiles(DatabaseLayout directoryLayout, DatabaseLayout migrationLayout, RecordFormats oldFormat, RecordFormats newFormat, ProgressReporter progress, CursorContext cursorContext) throws IOException {
// The store .id files needs to be migrated. At this point some of them have been sort-of-migrated, i.e. merely ported
// to the new format, but just got the highId and nothing else. Regardless we want to do a proper migration here,
// which not only means creating an empty id file w/ only highId. No, it also means scanning the stores and properly
// updating its freelist so that from this point no ids will be lost, ever.
// For every store type: if store is in migrationLayout use that, else use the one from the dbLayout because it will
// be of the current format already. Then scan it and create the .id file in the migration layout.
List<StoreType> storesInDbDirectory = new ArrayList<>();
List<StoreType> storesInMigrationDirectory = new ArrayList<>();
for (StoreType storeType : StoreType.values()) {
// See if it exists in migration directory, otherwise it must be in the db directory
List<StoreType> list = fileSystem.fileExists(migrationLayout.file(storeType.getDatabaseFile())) ? storesInMigrationDirectory : storesInDbDirectory;
list.add(storeType);
}
progress.start(storesInDbDirectory.size() + storesInMigrationDirectory.size());
// Rebuild the .id files from the legacy stores that haven't been upgraded, i.e. if they remained unchanged
// Make them end up in upgrade/<store>.id so that they won't overwrite the origin .id file before the upgrade is completed
IdGeneratorFactory rebuiltIdGeneratorsFromOldStore = new DefaultIdGeneratorFactory(fileSystem, immediate(), directoryLayout.getDatabaseName()) {
@Override
public IdGenerator open(PageCache pageCache, Path filename, IdType idType, LongSupplier highIdScanner, long maxId, DatabaseReadOnlyChecker readOnlyChecker, Config config, CursorContext cursorContext, ImmutableSet<OpenOption> openOptions) throws IOException {
Path redirectedFilename = migrationLayout.databaseDirectory().resolve(filename.getFileName().toString());
return super.open(pageCache, redirectedFilename, idType, highIdScanner, maxId, readOnlyChecker, config, cursorContext, openOptions);
}
@Override
public IdGenerator create(PageCache pageCache, Path fileName, IdType idType, long highId, boolean throwIfFileExists, long maxId, DatabaseReadOnlyChecker readOnlyChecker, Config config, CursorContext cursorContext, ImmutableSet<OpenOption> openOptions) {
throw new IllegalStateException("The store file should exist and therefore all calls should be to open, not create");
}
};
startAndTriggerRebuild(directoryLayout, oldFormat, rebuiltIdGeneratorsFromOldStore, storesInDbDirectory, progress, cursorContext);
// Build the ones from the migration directory, those stores that have been migrated
// Before doing this we will have to create empty stores for those that are missing, otherwise some of the stores
// that we need to open will complain because some of their "sub-stores" doesn't exist. They will be empty, it's fine...
// and we will not read from them at all. They will just sit there and allow their parent store to be opened.
// We'll remove them after we have built the id files
IdGeneratorFactory rebuiltIdGeneratorsFromNewStore = new DefaultIdGeneratorFactory(fileSystem, immediate(), migrationLayout.getDatabaseName());
Set<Path> placeHolderStoreFiles = createEmptyPlaceHolderStoreFiles(migrationLayout, newFormat);
startAndTriggerRebuild(migrationLayout, newFormat, rebuiltIdGeneratorsFromNewStore, storesInMigrationDirectory, progress, cursorContext);
for (Path emptyPlaceHolderStoreFile : placeHolderStoreFiles) {
fileSystem.deleteFile(emptyPlaceHolderStoreFile);
}
}
use of org.neo4j.configuration.helpers.DatabaseReadOnlyChecker in project neo4j by neo4j.
the class Recovery method performRecovery.
/**
* Performs recovery of database described by provided layout.
*
* @param fs database filesystem
* @param pageCache page cache used to perform database recovery.
* @param tracers underlying operation tracers
* @param config custom configuration
* @param databaseLayout database to recover layout.
* @param storageEngineFactory {@link StorageEngineFactory} for the storage to recover.
* @param logProvider log provider
* @param globalMonitors global server monitors
* @param extensionFactories extension factories for extensions that should participate in recovery
* @param providedLogFiles log files from database
* @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
* In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
* even if the normal "is recovery required" checks says that recovery isn't required.
* @throws IOException on any unexpected I/O exception encountered during recovery.
*/
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, LogProvider logProvider, Monitors globalMonitors, Iterable<ExtensionFactory<?>> extensionFactories, Optional<LogFiles> providedLogFiles, RecoveryStartupChecker startupChecker, MemoryTracker memoryTracker, Clock clock) throws IOException {
Log recoveryLog = logProvider.getLog(Recovery.class);
if (!forceRunRecovery && !isRecoveryRequired(fs, pageCache, databaseLayout, storageEngineFactory, config, providedLogFiles, memoryTracker)) {
return;
}
checkAllFilesPresence(databaseLayout, fs, pageCache, storageEngineFactory);
LifeSupport recoveryLife = new LifeSupport();
Monitors monitors = new Monitors(globalMonitors, logProvider);
DatabasePageCache databasePageCache = new DatabasePageCache(pageCache, IOController.DISABLED);
SimpleLogService logService = new SimpleLogService(logProvider);
VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
DatabaseReadOnlyChecker readOnlyChecker = writable();
DatabaseSchemaState schemaState = new DatabaseSchemaState(logProvider);
JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
VersionContextSupplier versionContextSupplier = EmptyVersionContextSupplier.EMPTY;
DatabaseHealth databaseHealth = new DatabaseHealth(PanicEventGenerator.NO_OP, recoveryLog);
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_RELATIONSHIP_TYPE));
RecoveryCleanupWorkCollector recoveryCleanupCollector = new GroupingRecoveryCleanupWorkCollector(scheduler, INDEX_CLEANUP, INDEX_CLEANUP_WORK, databaseLayout.getDatabaseName());
DatabaseExtensions extensions = instantiateRecoveryExtensions(databaseLayout, fs, config, logService, databasePageCache, scheduler, DbmsInfo.TOOL, monitors, tokenHolders, recoveryCleanupCollector, readOnlyChecker, extensionFactories, tracers.getPageCacheTracer());
DefaultIndexProviderMap indexProviderMap = new DefaultIndexProviderMap(extensions, config);
StorageEngine storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, config, databasePageCache, tokenHolders, schemaState, getConstraintSemantics(), indexProviderMap, NO_LOCK_SERVICE, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), databaseHealth, logService.getInternalLogProvider(), recoveryCleanupCollector, tracers.getPageCacheTracer(), true, readOnlyChecker, memoryTracker);
// Schema indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(NO_LOCK_SERVICE, storageEngine::newReader, config, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(config, storageEngine::newReader, NO_LOCKS, fullScanStoreView, NO_LOCK_SERVICE, logProvider);
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupCollector, readOnlyChecker, tracers.getPageCacheTracer());
IndexingService indexingService = Database.buildIndexingService(storageEngine, schemaState, indexStoreViewFactory, indexStatisticsStore, config, scheduler, indexProviderMap, tokenHolders, logProvider, logProvider, monitors.newMonitor(IndexingService.Monitor.class), tracers.getPageCacheTracer(), memoryTracker, databaseLayout.getDatabaseName(), readOnlyChecker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
Dependencies dependencies = new Dependencies();
dependencies.satisfyDependencies(databaseLayout, config, databasePageCache, fs, logProvider, tokenHolders, schemaState, getConstraintSemantics(), NO_LOCK_SERVICE, databaseHealth, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), readOnlyChecker, versionContextSupplier, logService, metadataProvider);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(logEntryReader).withConfig(config).withDependencies(dependencies).withMemoryTracker(memoryTracker).build();
boolean failOnCorruptedLogFiles = config.get(GraphDatabaseInternalSettings.fail_on_corrupted_log_files);
validateStoreId(logFiles, storageEngine.getStoreId(), config);
TransactionMetadataCache metadataCache = new TransactionMetadataCache();
PhysicalLogicalTransactionStore transactionStore = new PhysicalLogicalTransactionStore(logFiles, metadataCache, logEntryReader, monitors, failOnCorruptedLogFiles);
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, LogRotation.NO_ROTATION, metadataCache, metadataProvider, databaseHealth);
LifeSupport schemaLife = new LifeSupport();
schemaLife.add(storageEngine.schemaAndTokensLifecycle());
schemaLife.add(indexingService);
var doParallelRecovery = config.get(GraphDatabaseInternalSettings.do_parallel_recovery);
TransactionLogsRecovery transactionLogsRecovery = transactionLogRecovery(fs, metadataProvider, monitors.newMonitor(RecoveryMonitor.class), monitors.newMonitor(RecoveryStartInformationProvider.Monitor.class), logFiles, storageEngine, transactionStore, metadataProvider, schemaLife, databaseLayout, failOnCorruptedLogFiles, recoveryLog, startupChecker, tracers.getPageCacheTracer(), memoryTracker, doParallelRecovery);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
var checkpointAppender = logFiles.getCheckpointFile().getCheckpointAppender();
CheckPointerImpl checkPointer = new CheckPointerImpl(metadataProvider, RecoveryThreshold.INSTANCE, forceOperation, LogPruning.NO_PRUNING, checkpointAppender, databaseHealth, logProvider, tracers, IOController.DISABLED, new StoreCopyCheckPointMutex(), versionContextSupplier, clock);
recoveryLife.add(scheduler);
recoveryLife.add(recoveryCleanupCollector);
recoveryLife.add(extensions);
recoveryLife.add(indexProviderMap);
recoveryLife.add(storageEngine);
recoveryLife.add(new MissingTransactionLogsCheck(databaseLayout, config, fs, logFiles, recoveryLog));
recoveryLife.add(logFiles);
recoveryLife.add(transactionLogsRecovery);
recoveryLife.add(transactionAppender);
recoveryLife.add(checkPointer);
try {
recoveryLife.start();
if (databaseHealth.isHealthy()) {
checkPointer.forceCheckPoint(new SimpleTriggerInfo("Recovery completed."));
}
} finally {
recoveryLife.shutdown();
}
}
use of org.neo4j.configuration.helpers.DatabaseReadOnlyChecker in project neo4j by neo4j.
the class ConsistencyCheckService method runFullConsistencyCheck.
public Result runFullConsistencyCheck(DatabaseLayout databaseLayout, Config config, ProgressMonitorFactory progressFactory, final LogProvider logProvider, final FileSystemAbstraction fileSystem, final PageCache pageCache, DebugContext debugContext, Path reportDir, ConsistencyFlags consistencyFlags, PageCacheTracer pageCacheTracer, MemoryTracker memoryTracker) throws ConsistencyCheckIncompleteException {
assertRecovered(databaseLayout, config, fileSystem, memoryTracker);
Log outLog = logProvider.getLog(getClass());
config.set(GraphDatabaseSettings.pagecache_warmup_enabled, false);
LifeSupport life = new LifeSupport();
final DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory(fileSystem, immediate(), databaseLayout.getDatabaseName());
DatabaseReadOnlyChecker readOnlyChecker = readOnly();
StoreFactory factory = new StoreFactory(databaseLayout, config, idGeneratorFactory, pageCache, fileSystem, logProvider, pageCacheTracer, readOnlyChecker);
// Don't start the counts stores here as part of life, instead only shut down. This is because it's better to let FullCheck
// start it and add its missing/broken detection where it can report to user.
ConsistencySummaryStatistics summary;
final Path reportFile = chooseReportPath(reportDir);
Log4jLogProvider reportLogProvider = new Log4jLogProvider(LogConfig.createBuilder(fileSystem, reportFile, Level.INFO).createOnDemand().withCategory(false).build());
Log reportLog = reportLogProvider.getLog(getClass());
Log log = new DuplicatingLog(outLog, reportLog);
// Bootstrap kernel extensions
Monitors monitors = new Monitors();
JobScheduler jobScheduler = life.add(JobSchedulerFactory.createInitialisedScheduler());
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_RELATIONSHIP_TYPE));
final RecoveryCleanupWorkCollector workCollector = RecoveryCleanupWorkCollector.ignore();
DatabaseExtensions extensions = life.add(instantiateExtensions(databaseLayout, fileSystem, config, new SimpleLogService(logProvider), pageCache, jobScheduler, workCollector, // We use TOOL context because it's true, and also because it uses the 'single' operational mode, which is important.
TOOL, monitors, tokenHolders, pageCacheTracer, readOnlyChecker));
DefaultIndexProviderMap indexes = life.add(new DefaultIndexProviderMap(extensions, config));
try (NeoStores neoStores = factory.openAllNeoStores()) {
long lastCommittedTransactionId = neoStores.getMetaDataStore().getLastCommittedTransactionId();
CountsStoreManager countsStoreManager = life.add(new CountsStoreManager(pageCache, fileSystem, databaseLayout, pageCacheTracer, memoryTracker, lastCommittedTransactionId));
RelationshipGroupDegreesStoreManager groupDegreesStoreManager = life.add(new RelationshipGroupDegreesStoreManager(pageCache, fileSystem, databaseLayout, pageCacheTracer, memoryTracker, lastCommittedTransactionId));
// Load tokens before starting extensions, etc.
try (var cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer(CONSISTENCY_TOKEN_READER_TAG))) {
tokenHolders.setInitialTokens(StoreTokens.allReadableTokens(neoStores), cursorContext);
}
life.start();
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(pageCache, databaseLayout, workCollector, readOnlyChecker, pageCacheTracer);
life.add(indexStatisticsStore);
int numberOfThreads = defaultConsistencyCheckThreadsNumber();
DirectStoreAccess stores = new DirectStoreAccess(neoStores, indexes, tokenHolders, indexStatisticsStore, idGeneratorFactory);
double memoryLimitLeewayFactor = config.get(GraphDatabaseInternalSettings.consistency_check_memory_limit_factor);
FullCheck check = new FullCheck(progressFactory, numberOfThreads, consistencyFlags, config, debugContext, NodeBasedMemoryLimiter.defaultWithLeeway(memoryLimitLeewayFactor));
summary = check.execute(pageCache, stores, countsStoreManager, groupDegreesStoreManager, null, pageCacheTracer, memoryTracker, log);
} finally {
life.shutdown();
reportLogProvider.close();
}
if (!summary.isConsistent()) {
log.warn("See '%s' for a detailed consistency report.", reportFile);
return Result.failure(reportFile, summary);
}
return Result.success(reportFile, summary);
}
use of org.neo4j.configuration.helpers.DatabaseReadOnlyChecker in project neo4j by neo4j.
the class NodeStoreTest method newNodeStore.
private NodeStore newNodeStore(FileSystemAbstraction fs, PageCache pageCache) {
IdGeneratorFactory idGeneratorFactory = spy(new DefaultIdGeneratorFactory(fs, immediate(), databaseLayout.getDatabaseName()) {
@Override
protected IndexedIdGenerator instantiate(FileSystemAbstraction fs, PageCache pageCache, RecoveryCleanupWorkCollector recoveryCleanupWorkCollector, Path fileName, LongSupplier highIdSupplier, long maxValue, IdType idType, DatabaseReadOnlyChecker readOnlyChecker, Config config, CursorContext cursorContext, String databaseName, ImmutableSet<OpenOption> openOptions) {
return spy(super.instantiate(fs, pageCache, recoveryCleanupWorkCollector, fileName, highIdSupplier, maxValue, idType, readOnlyChecker, config, cursorContext, databaseName, openOptions));
}
});
StoreFactory factory = new StoreFactory(databaseLayout, Config.defaults(), idGeneratorFactory, pageCache, fs, NullLogProvider.getInstance(), PageCacheTracer.NULL, writable());
neoStores = factory.openAllNeoStores(true);
nodeStore = neoStores.getNodeStore();
return nodeStore;
}
Aggregations