use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class VersionAwareLogEntryReaderIT method readTillTheEndOfNotPreallocatedFile.
@Test
@DisabledOnOs(OS.LINUX)
void readTillTheEndOfNotPreallocatedFile() throws IOException {
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(entryReader).withLogVersionRepository(new SimpleLogVersionRepository()).withTransactionIdStore(new SimpleTransactionIdStore()).withStoreId(StoreId.UNKNOWN).build();
try (Lifespan lifespan = new Lifespan(logFiles)) {
LogPosition logPosition = entryReader.lastPosition();
assertEquals(0L, logPosition.getLogVersion());
assertEquals(Files.size(logFiles.getLogFile().getHighestLogFile()), logPosition.getByteOffset());
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class TransactionRangeDiagnostics method dump.
@Override
public void dump(DiagnosticsLogger logger) {
LogFiles logFiles = database.getDependencyResolver().resolveDependency(LogFiles.class);
try {
logger.log("Transaction log files stored on file store: " + FileUtils.getFileStoreType(logFiles.logFilesDirectory()));
dumpTransactionLogInformation(logger, logFiles.getLogFile());
dumpCheckpointLogInformation(logger, logFiles.getCheckpointFile());
} catch (Exception e) {
logger.log("Error trying to dump transaction log files info.");
logger.log(Exceptions.stringify(e));
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class Database method start.
/**
* Start the database and make it ready for transaction processing.
* A database will automatically recover itself, if necessary, when started.
* If the store files are obsolete (older than oldest supported version), then start will throw an exception.
*/
@Override
public synchronized void start() {
if (started) {
return;
}
// Ensure we're initialized
init();
try {
// Upgrade the store before we begin
upgradeStore(databaseConfig, databasePageCache, otherDatabaseMemoryTracker);
// Check the tail of transaction logs and validate version
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
LogFiles logFiles = getLogFiles(logEntryReader);
databaseMonitors.addMonitorListener(new LoggingLogFileMonitor(msgLog));
databaseMonitors.addMonitorListener(new LoggingLogTailScannerMonitor(internalLogProvider.getLog(AbstractLogTailScanner.class)));
databaseMonitors.addMonitorListener(new ReverseTransactionCursorLoggingMonitor(internalLogProvider.getLog(ReversedSingleFileTransactionCursor.class)));
var pageCacheTracer = tracers.getPageCacheTracer();
boolean storageExists = storageEngineFactory.storageExists(fs, databaseLayout, databasePageCache);
validateStoreAndTxLogs(logFiles, pageCacheTracer, storageExists);
performRecovery(fs, databasePageCache, tracers, databaseConfig, databaseLayout, storageEngineFactory, false, internalLogProvider, databaseMonitors, extensionFactories, Optional.of(logFiles), new RecoveryStartupChecker(startupController, namedDatabaseId), otherDatabaseMemoryTracker, clock);
// Build all modules and their services
DatabaseSchemaState databaseSchemaState = new DatabaseSchemaState(internalLogProvider);
idController.initialize(() -> kernelModule.kernelTransactions().get());
storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, databaseConfig, databasePageCache, tokenHolders, databaseSchemaState, constraintSemantics, indexProviderMap, lockService, idGeneratorFactory, idController, databaseHealth, internalLogProvider, recoveryCleanupWorkCollector, pageCacheTracer, !storageExists, readOnlyDatabaseChecker, otherDatabaseMemoryTracker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
databaseDependencies.satisfyDependency(metadataProvider);
// Recreate the logFiles after storage engine to get access to dependencies
logFiles = getLogFiles(logEntryReader);
life.add(storageEngine);
life.add(storageEngine.schemaAndTokensLifecycle());
life.add(logFiles);
// Token indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(lockService, storageEngine::newReader, databaseConfig, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(databaseConfig, storageEngine::newReader, locks, fullScanStoreView, lockService, internalLogProvider);
// Schema indexes
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupWorkCollector, readOnlyDatabaseChecker, pageCacheTracer);
IndexingService indexingService = buildIndexingService(storageEngine, databaseSchemaState, indexStoreViewFactory, indexStatisticsStore, pageCacheTracer, otherDatabaseMemoryTracker);
databaseDependencies.satisfyDependency(storageEngine.countsAccessor());
versionContextSupplier.init(metadataProvider::getLastClosedTransactionId);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
DatabaseTransactionLogModule transactionLogModule = buildTransactionLogs(logFiles, databaseConfig, internalLogProvider, scheduler, forceOperation, logEntryReader, metadataProvider, databaseMonitors, databaseDependencies);
databaseTransactionEventListeners = new DatabaseTransactionEventListeners(databaseFacade, transactionEventListeners, namedDatabaseId);
life.add(databaseTransactionEventListeners);
final DatabaseKernelModule kernelModule = buildKernel(logFiles, transactionLogModule.transactionAppender(), indexingService, databaseSchemaState, storageEngine, metadataProvider, metadataProvider, databaseAvailabilityGuard, clock, indexStatisticsStore, leaseService);
kernelModule.satisfyDependencies(databaseDependencies);
// Do these assignments last so that we can ensure no cyclical dependencies exist
this.kernelModule = kernelModule;
databaseDependencies.satisfyDependency(databaseSchemaState);
databaseDependencies.satisfyDependency(logEntryReader);
databaseDependencies.satisfyDependency(storageEngine);
databaseDependencies.satisfyDependency(indexingService);
databaseDependencies.satisfyDependency(indexStoreViewFactory);
databaseDependencies.satisfyDependency(indexStatisticsStore);
databaseDependencies.satisfyDependency(indexProviderMap);
databaseDependencies.satisfyDependency(forceOperation);
databaseDependencies.satisfyDependency(new DatabaseEntityCounters(this.idGeneratorFactory, databaseDependencies.resolveDependency(CountsAccessor.class)));
var providerSpi = QueryEngineProvider.spi(internalLogProvider, databaseMonitors, scheduler, life, getKernel(), databaseConfig);
this.executionEngine = QueryEngineProvider.initialize(databaseDependencies, databaseFacade, engineProvider, isSystem(), providerSpi);
this.checkpointerLifecycle = new CheckpointerLifecycle(transactionLogModule.checkPointer(), databaseHealth, ioController);
life.add(databaseHealth);
life.add(databaseAvailabilityGuard);
life.add(databaseAvailability);
life.setLast(checkpointerLifecycle);
databaseDependencies.resolveDependency(DbmsDiagnosticsManager.class).dumpDatabaseDiagnostics(this);
life.start();
registerUpgradeListener();
eventListeners.databaseStart(namedDatabaseId);
/*
* At this point recovery has completed and the database is ready for use. Whatever panic might have
* happened before has been healed. So we can safely set the kernel health to ok.
* This right now has any real effect only in the case of internal restarts (for example, after a store copy).
* Standalone instances will have to be restarted by the user, as is proper for all database panics.
*/
databaseHealth.healed();
started = true;
postStartupInit(storageExists);
} catch (Throwable e) {
handleStartupFailure(e);
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class Recovery method performRecovery.
/**
* Performs recovery of database described by provided layout.
*
* @param fs database filesystem
* @param pageCache page cache used to perform database recovery.
* @param tracers underlying operation tracers
* @param config custom configuration
* @param databaseLayout database to recover layout.
* @param storageEngineFactory {@link StorageEngineFactory} for the storage to recover.
* @param logProvider log provider
* @param globalMonitors global server monitors
* @param extensionFactories extension factories for extensions that should participate in recovery
* @param providedLogFiles log files from database
* @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
* In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
* even if the normal "is recovery required" checks says that recovery isn't required.
* @throws IOException on any unexpected I/O exception encountered during recovery.
*/
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, LogProvider logProvider, Monitors globalMonitors, Iterable<ExtensionFactory<?>> extensionFactories, Optional<LogFiles> providedLogFiles, RecoveryStartupChecker startupChecker, MemoryTracker memoryTracker, Clock clock) throws IOException {
Log recoveryLog = logProvider.getLog(Recovery.class);
if (!forceRunRecovery && !isRecoveryRequired(fs, pageCache, databaseLayout, storageEngineFactory, config, providedLogFiles, memoryTracker)) {
return;
}
checkAllFilesPresence(databaseLayout, fs, pageCache, storageEngineFactory);
LifeSupport recoveryLife = new LifeSupport();
Monitors monitors = new Monitors(globalMonitors, logProvider);
DatabasePageCache databasePageCache = new DatabasePageCache(pageCache, IOController.DISABLED);
SimpleLogService logService = new SimpleLogService(logProvider);
VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
DatabaseReadOnlyChecker readOnlyChecker = writable();
DatabaseSchemaState schemaState = new DatabaseSchemaState(logProvider);
JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
VersionContextSupplier versionContextSupplier = EmptyVersionContextSupplier.EMPTY;
DatabaseHealth databaseHealth = new DatabaseHealth(PanicEventGenerator.NO_OP, recoveryLog);
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_RELATIONSHIP_TYPE));
RecoveryCleanupWorkCollector recoveryCleanupCollector = new GroupingRecoveryCleanupWorkCollector(scheduler, INDEX_CLEANUP, INDEX_CLEANUP_WORK, databaseLayout.getDatabaseName());
DatabaseExtensions extensions = instantiateRecoveryExtensions(databaseLayout, fs, config, logService, databasePageCache, scheduler, DbmsInfo.TOOL, monitors, tokenHolders, recoveryCleanupCollector, readOnlyChecker, extensionFactories, tracers.getPageCacheTracer());
DefaultIndexProviderMap indexProviderMap = new DefaultIndexProviderMap(extensions, config);
StorageEngine storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, config, databasePageCache, tokenHolders, schemaState, getConstraintSemantics(), indexProviderMap, NO_LOCK_SERVICE, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), databaseHealth, logService.getInternalLogProvider(), recoveryCleanupCollector, tracers.getPageCacheTracer(), true, readOnlyChecker, memoryTracker);
// Schema indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(NO_LOCK_SERVICE, storageEngine::newReader, config, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(config, storageEngine::newReader, NO_LOCKS, fullScanStoreView, NO_LOCK_SERVICE, logProvider);
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupCollector, readOnlyChecker, tracers.getPageCacheTracer());
IndexingService indexingService = Database.buildIndexingService(storageEngine, schemaState, indexStoreViewFactory, indexStatisticsStore, config, scheduler, indexProviderMap, tokenHolders, logProvider, logProvider, monitors.newMonitor(IndexingService.Monitor.class), tracers.getPageCacheTracer(), memoryTracker, databaseLayout.getDatabaseName(), readOnlyChecker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
Dependencies dependencies = new Dependencies();
dependencies.satisfyDependencies(databaseLayout, config, databasePageCache, fs, logProvider, tokenHolders, schemaState, getConstraintSemantics(), NO_LOCK_SERVICE, databaseHealth, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), readOnlyChecker, versionContextSupplier, logService, metadataProvider);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(logEntryReader).withConfig(config).withDependencies(dependencies).withMemoryTracker(memoryTracker).build();
boolean failOnCorruptedLogFiles = config.get(GraphDatabaseInternalSettings.fail_on_corrupted_log_files);
validateStoreId(logFiles, storageEngine.getStoreId(), config);
TransactionMetadataCache metadataCache = new TransactionMetadataCache();
PhysicalLogicalTransactionStore transactionStore = new PhysicalLogicalTransactionStore(logFiles, metadataCache, logEntryReader, monitors, failOnCorruptedLogFiles);
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, LogRotation.NO_ROTATION, metadataCache, metadataProvider, databaseHealth);
LifeSupport schemaLife = new LifeSupport();
schemaLife.add(storageEngine.schemaAndTokensLifecycle());
schemaLife.add(indexingService);
var doParallelRecovery = config.get(GraphDatabaseInternalSettings.do_parallel_recovery);
TransactionLogsRecovery transactionLogsRecovery = transactionLogRecovery(fs, metadataProvider, monitors.newMonitor(RecoveryMonitor.class), monitors.newMonitor(RecoveryStartInformationProvider.Monitor.class), logFiles, storageEngine, transactionStore, metadataProvider, schemaLife, databaseLayout, failOnCorruptedLogFiles, recoveryLog, startupChecker, tracers.getPageCacheTracer(), memoryTracker, doParallelRecovery);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
var checkpointAppender = logFiles.getCheckpointFile().getCheckpointAppender();
CheckPointerImpl checkPointer = new CheckPointerImpl(metadataProvider, RecoveryThreshold.INSTANCE, forceOperation, LogPruning.NO_PRUNING, checkpointAppender, databaseHealth, logProvider, tracers, IOController.DISABLED, new StoreCopyCheckPointMutex(), versionContextSupplier, clock);
recoveryLife.add(scheduler);
recoveryLife.add(recoveryCleanupCollector);
recoveryLife.add(extensions);
recoveryLife.add(indexProviderMap);
recoveryLife.add(storageEngine);
recoveryLife.add(new MissingTransactionLogsCheck(databaseLayout, config, fs, logFiles, recoveryLog));
recoveryLife.add(logFiles);
recoveryLife.add(transactionLogsRecovery);
recoveryLife.add(transactionAppender);
recoveryLife.add(checkPointer);
try {
recoveryLife.start();
if (databaseHealth.isHealthy()) {
checkPointer.forceCheckPoint(new SimpleTriggerInfo("Recovery completed."));
}
} finally {
recoveryLife.shutdown();
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class TransactionRangeDiagnosticsTest method logs.
private static LogFiles logs(ThrowingConsumer<LogFile, IOException> transactionLogs, ThrowingConsumer<CheckpointFile, IOException> checkpointLogs) throws IOException {
LogFiles files = mock(TransactionLogFiles.class);
when(files.logFilesDirectory()).thenReturn(Path.of("."));
LogFile transactionFiles = mock(LogFile.class);
when(files.getLogFile()).thenReturn(transactionFiles);
transactionLogs.accept(transactionFiles);
CheckpointFile checkpointFiles = mock(CheckpointFile.class);
when(files.getCheckpointFile()).thenReturn(checkpointFiles);
checkpointLogs.accept(checkpointFiles);
return files;
}
Aggregations