use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class MigrationTestUtils method removeCheckPointFromTxLog.
public static void removeCheckPointFromTxLog(FileSystemAbstraction fileSystem, File workingDirectory) throws IOException {
PhysicalLogFiles logFiles = new PhysicalLogFiles(workingDirectory, fileSystem);
LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder finder = new LatestCheckPointFinder(logFiles, fileSystem, logEntryReader);
LatestCheckPointFinder.LatestCheckPoint latestCheckPoint = finder.find(logFiles.getHighestLogVersion());
if (latestCheckPoint.commitsAfterCheckPoint) {
// done already
return;
}
// let's assume there is at least a checkpoint
assertNotNull(latestCheckPoint.checkPoint);
LogPosition logPosition = latestCheckPoint.checkPoint.getLogPosition();
File logFile = logFiles.getLogFileForVersion(logPosition.getLogVersion());
fileSystem.truncate(logFile, logPosition.getByteOffset());
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class RecoveryCorruptedTransactionLogIT method getLastReadablePosition.
private LogPosition getLastReadablePosition(Path logFile) throws IOException {
VersionAwareLogEntryReader entryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
LogFile txLogFile = logFiles.getLogFile();
long logVersion = txLogFile.getLogVersion(logFile);
LogPosition startPosition = txLogFile.extractHeader(logVersion).getStartPosition();
try (ReadableLogChannel reader = openTransactionFileChannel(logVersion, startPosition)) {
while (entryReader.readLogEntry(reader) != null) {
// scroll to the end of readable entries
}
} catch (IncompleteLogHeaderException e) {
return new LogPosition(logVersion, 0);
}
return entryReader.lastPosition();
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class RecoveryCorruptedTransactionLogIT method getLastReadablePosition.
private LogPosition getLastReadablePosition(LogFile logFile) throws IOException {
VersionAwareLogEntryReader entryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
LogPosition startPosition = logFile.extractHeader(logFiles.getLogFile().getHighestLogVersion()).getStartPosition();
try (ReadableLogChannel reader = logFile.getReader(startPosition)) {
while (entryReader.readLogEntry(reader) != null) {
// scroll to the end of readable entries
}
}
return entryReader.lastPosition();
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class LogVersionUpgradeCheckerIT method appendCheckpoint.
private void appendCheckpoint(byte logEntryVersion, boolean removeCheckpointFile) throws IOException {
VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(StorageEngineFactory.defaultStorageEngine().commandReaderFactory());
LogFiles logFiles = LogFilesBuilder.activeFilesBuilder(databaseLayout, fileSystem, pageCache).withLogEntryReader(logEntryReader).withStoreId(StoreId.UNKNOWN).build();
if (removeCheckpointFile) {
for (Path file : fileSystem.listFiles(logFiles.logFilesDirectory(), path -> path.getFileName().toString().startsWith(CHECKPOINT_FILE_PREFIX))) {
fileSystem.deleteFile(file);
}
}
try (Lifespan lifespan = new Lifespan(logFiles)) {
LogFile logFile = logFiles.getLogFile();
TransactionLogWriter transactionLogWriter = logFile.getTransactionLogWriter();
var channel = transactionLogWriter.getChannel();
LogPosition logPosition = transactionLogWriter.getCurrentPosition();
// Fake record
channel.put(logEntryVersion).put(LEGACY_CHECK_POINT).putLong(logPosition.getLogVersion()).putLong(logPosition.getByteOffset());
logFile.flush();
}
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class Database method start.
/**
* Start the database and make it ready for transaction processing.
* A database will automatically recover itself, if necessary, when started.
* If the store files are obsolete (older than oldest supported version), then start will throw an exception.
*/
@Override
public synchronized void start() {
if (started) {
return;
}
// Ensure we're initialized
init();
try {
// Upgrade the store before we begin
upgradeStore(databaseConfig, databasePageCache, otherDatabaseMemoryTracker);
// Check the tail of transaction logs and validate version
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
LogFiles logFiles = getLogFiles(logEntryReader);
databaseMonitors.addMonitorListener(new LoggingLogFileMonitor(msgLog));
databaseMonitors.addMonitorListener(new LoggingLogTailScannerMonitor(internalLogProvider.getLog(AbstractLogTailScanner.class)));
databaseMonitors.addMonitorListener(new ReverseTransactionCursorLoggingMonitor(internalLogProvider.getLog(ReversedSingleFileTransactionCursor.class)));
var pageCacheTracer = tracers.getPageCacheTracer();
boolean storageExists = storageEngineFactory.storageExists(fs, databaseLayout, databasePageCache);
validateStoreAndTxLogs(logFiles, pageCacheTracer, storageExists);
performRecovery(fs, databasePageCache, tracers, databaseConfig, databaseLayout, storageEngineFactory, false, internalLogProvider, databaseMonitors, extensionFactories, Optional.of(logFiles), new RecoveryStartupChecker(startupController, namedDatabaseId), otherDatabaseMemoryTracker, clock);
// Build all modules and their services
DatabaseSchemaState databaseSchemaState = new DatabaseSchemaState(internalLogProvider);
idController.initialize(() -> kernelModule.kernelTransactions().get());
storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, databaseConfig, databasePageCache, tokenHolders, databaseSchemaState, constraintSemantics, indexProviderMap, lockService, idGeneratorFactory, idController, databaseHealth, internalLogProvider, recoveryCleanupWorkCollector, pageCacheTracer, !storageExists, readOnlyDatabaseChecker, otherDatabaseMemoryTracker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
databaseDependencies.satisfyDependency(metadataProvider);
// Recreate the logFiles after storage engine to get access to dependencies
logFiles = getLogFiles(logEntryReader);
life.add(storageEngine);
life.add(storageEngine.schemaAndTokensLifecycle());
life.add(logFiles);
// Token indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(lockService, storageEngine::newReader, databaseConfig, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(databaseConfig, storageEngine::newReader, locks, fullScanStoreView, lockService, internalLogProvider);
// Schema indexes
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupWorkCollector, readOnlyDatabaseChecker, pageCacheTracer);
IndexingService indexingService = buildIndexingService(storageEngine, databaseSchemaState, indexStoreViewFactory, indexStatisticsStore, pageCacheTracer, otherDatabaseMemoryTracker);
databaseDependencies.satisfyDependency(storageEngine.countsAccessor());
versionContextSupplier.init(metadataProvider::getLastClosedTransactionId);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
DatabaseTransactionLogModule transactionLogModule = buildTransactionLogs(logFiles, databaseConfig, internalLogProvider, scheduler, forceOperation, logEntryReader, metadataProvider, databaseMonitors, databaseDependencies);
databaseTransactionEventListeners = new DatabaseTransactionEventListeners(databaseFacade, transactionEventListeners, namedDatabaseId);
life.add(databaseTransactionEventListeners);
final DatabaseKernelModule kernelModule = buildKernel(logFiles, transactionLogModule.transactionAppender(), indexingService, databaseSchemaState, storageEngine, metadataProvider, metadataProvider, databaseAvailabilityGuard, clock, indexStatisticsStore, leaseService);
kernelModule.satisfyDependencies(databaseDependencies);
// Do these assignments last so that we can ensure no cyclical dependencies exist
this.kernelModule = kernelModule;
databaseDependencies.satisfyDependency(databaseSchemaState);
databaseDependencies.satisfyDependency(logEntryReader);
databaseDependencies.satisfyDependency(storageEngine);
databaseDependencies.satisfyDependency(indexingService);
databaseDependencies.satisfyDependency(indexStoreViewFactory);
databaseDependencies.satisfyDependency(indexStatisticsStore);
databaseDependencies.satisfyDependency(indexProviderMap);
databaseDependencies.satisfyDependency(forceOperation);
databaseDependencies.satisfyDependency(new DatabaseEntityCounters(this.idGeneratorFactory, databaseDependencies.resolveDependency(CountsAccessor.class)));
var providerSpi = QueryEngineProvider.spi(internalLogProvider, databaseMonitors, scheduler, life, getKernel(), databaseConfig);
this.executionEngine = QueryEngineProvider.initialize(databaseDependencies, databaseFacade, engineProvider, isSystem(), providerSpi);
this.checkpointerLifecycle = new CheckpointerLifecycle(transactionLogModule.checkPointer(), databaseHealth, ioController);
life.add(databaseHealth);
life.add(databaseAvailabilityGuard);
life.add(databaseAvailability);
life.setLast(checkpointerLifecycle);
databaseDependencies.resolveDependency(DbmsDiagnosticsManager.class).dumpDatabaseDiagnostics(this);
life.start();
registerUpgradeListener();
eventListeners.databaseStart(namedDatabaseId);
/*
* At this point recovery has completed and the database is ready for use. Whatever panic might have
* happened before has been healed. So we can safely set the kernel health to ok.
* This right now has any real effect only in the case of internal restarts (for example, after a store copy).
* Standalone instances will have to be restarted by the user, as is proper for all database panics.
*/
databaseHealth.healed();
started = true;
postStartupInit(storageExists);
} catch (Throwable e) {
handleStartupFailure(e);
}
}
Aggregations