use of org.neo4j.monitoring.Monitors in project neo4j by neo4j.
the class DatabaseRecoveryIT method shouldSeeTheSameRecordsAtCheckpointAsAfterReverseRecovery.
@Test
void shouldSeeTheSameRecordsAtCheckpointAsAfterReverseRecovery() throws Exception {
// given
EphemeralFileSystemAbstraction fs = new EphemeralFileSystemAbstraction();
managementService = new TestDatabaseManagementServiceBuilder(directory.homePath()).setFileSystem(fs).impermanent().build();
GraphDatabaseService db = managementService.database(DEFAULT_DATABASE_NAME);
produceRandomGraphUpdates(db, 100);
checkPoint(db);
EphemeralFileSystemAbstraction checkPointFs = fs.snapshot();
// when
produceRandomGraphUpdates(db, 100);
flush(db);
EphemeralFileSystemAbstraction crashedFs = fs.snapshot();
managementService.shutdown();
fs.close();
Dependencies dependencies = new Dependencies();
Monitors monitors;
AtomicReference<EphemeralFileSystemAbstraction> reversedFs;
try (PageCache pageCache = pageCacheExtension.getPageCache(crashedFs)) {
dependencies.satisfyDependencies(pageCache);
monitors = new Monitors();
reversedFs = new AtomicReference<>();
monitors.addMonitorListener(new RecoveryMonitor() {
@Override
public void reverseStoreRecoveryCompleted(long checkpointTxId) {
try {
// Flush the page cache which will fished out of the GlobalModule at the point of constructing the database
pageCache.flushAndForce();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
// The stores should now be equal in content to the db as it was right after the checkpoint.
// Grab a snapshot so that we can compare later.
reversedFs.set(crashedFs.snapshot());
}
});
DatabaseManagementService managementService = new TestDatabaseManagementServiceBuilder(directory.homePath()).setFileSystem(crashedFs).setExternalDependencies(dependencies).setMonitors(monitors).impermanent().build();
managementService.shutdown();
}
// then
fs.close();
try {
// Here we verify that the neostore contents, record by record are exactly the same when comparing
// the store as it was right after the checkpoint with the store as it was right after reverse recovery completed.
assertSameStoreContents(checkPointFs, reversedFs.get(), databaseLayout);
} finally {
IOUtils.closeAll(checkPointFs, reversedFs.get());
}
}
use of org.neo4j.monitoring.Monitors in project neo4j by neo4j.
the class KernelTransactionsTest method newKernelTransactions.
private KernelTransactions newKernelTransactions(Locks locks, StorageEngine storageEngine, TransactionCommitProcess commitProcess, boolean testKernelTransactions, Config config) {
LifeSupport life = new LifeSupport();
life.start();
TransactionIdStore transactionIdStore = mock(TransactionIdStore.class);
when(transactionIdStore.getLastCommittedTransaction()).thenReturn(new TransactionId(0, 0, 0));
Tracers tracers = new Tracers("null", NullLog.getInstance(), new Monitors(), mock(JobScheduler.class), clock, config);
final DatabaseTracers databaseTracers = new DatabaseTracers(tracers);
KernelTransactions transactions;
if (testKernelTransactions) {
transactions = createTestTransactions(storageEngine, commitProcess, transactionIdStore, databaseTracers, locks, clock, databaseAvailabilityGuard);
} else {
transactions = createTransactions(storageEngine, commitProcess, transactionIdStore, databaseTracers, locks, clock, databaseAvailabilityGuard, config);
}
transactions.start();
return transactions;
}
use of org.neo4j.monitoring.Monitors in project neo4j by neo4j.
the class LuceneIndexProviderTest method createIndexProvider.
private LuceneIndexProvider createIndexProvider(Config config) {
var directoryFactory = new DirectoryFactory.InMemoryDirectoryFactory();
var directoryStructureFactory = directoriesByProvider(testDir.homePath());
return new LuceneIndexProvider(fileSystem, directoryFactory, directoryStructureFactory, new Monitors(), config, writable());
}
use of org.neo4j.monitoring.Monitors in project neo4j by neo4j.
the class Recovery method performRecovery.
/**
* Performs recovery of database described by provided layout.
* <b>Transaction logs should be located in their default location and any provided custom location is ignored.</b>
* If recovery is not required nothing will be done to the the database or logs.
*
* @param fs database filesystem
* @param pageCache page cache used to perform database recovery.
* @param tracers underlying operations tracer
* @param config custom configuration
* @param databaseLayout database to recover layout.
* @param storageEngineFactory storage engine factory
* @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
* In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
* even if the normal "is recovery required" checks says that recovery isn't required.
* @throws IOException on any unexpected I/O exception encountered during recovery.
*/
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, MemoryTracker memoryTracker) throws IOException {
requireNonNull(fs);
requireNonNull(pageCache);
requireNonNull(config);
requireNonNull(databaseLayout);
requireNonNull(storageEngineFactory);
// remove any custom logical logs location
Config recoveryConfig = Config.newBuilder().fromConfig(config).set(GraphDatabaseSettings.transaction_logs_root_path, null).build();
performRecovery(fs, pageCache, tracers, recoveryConfig, databaseLayout, StorageEngineFactory.defaultStorageEngine(), forceRunRecovery, NullLogProvider.getInstance(), new Monitors(), loadExtensions(), Optional.empty(), EMPTY_CHECKER, memoryTracker, systemClock());
}
use of org.neo4j.monitoring.Monitors in project neo4j by neo4j.
the class Recovery method performRecovery.
/**
* Performs recovery of database described by provided layout.
*
* @param fs database filesystem
* @param pageCache page cache used to perform database recovery.
* @param tracers underlying operation tracers
* @param config custom configuration
* @param databaseLayout database to recover layout.
* @param storageEngineFactory {@link StorageEngineFactory} for the storage to recover.
* @param logProvider log provider
* @param globalMonitors global server monitors
* @param extensionFactories extension factories for extensions that should participate in recovery
* @param providedLogFiles log files from database
* @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
* In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
* even if the normal "is recovery required" checks says that recovery isn't required.
* @throws IOException on any unexpected I/O exception encountered during recovery.
*/
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, LogProvider logProvider, Monitors globalMonitors, Iterable<ExtensionFactory<?>> extensionFactories, Optional<LogFiles> providedLogFiles, RecoveryStartupChecker startupChecker, MemoryTracker memoryTracker, Clock clock) throws IOException {
Log recoveryLog = logProvider.getLog(Recovery.class);
if (!forceRunRecovery && !isRecoveryRequired(fs, pageCache, databaseLayout, storageEngineFactory, config, providedLogFiles, memoryTracker)) {
return;
}
checkAllFilesPresence(databaseLayout, fs, pageCache, storageEngineFactory);
LifeSupport recoveryLife = new LifeSupport();
Monitors monitors = new Monitors(globalMonitors, logProvider);
DatabasePageCache databasePageCache = new DatabasePageCache(pageCache, IOController.DISABLED);
SimpleLogService logService = new SimpleLogService(logProvider);
VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
DatabaseReadOnlyChecker readOnlyChecker = writable();
DatabaseSchemaState schemaState = new DatabaseSchemaState(logProvider);
JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
VersionContextSupplier versionContextSupplier = EmptyVersionContextSupplier.EMPTY;
DatabaseHealth databaseHealth = new DatabaseHealth(PanicEventGenerator.NO_OP, recoveryLog);
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_RELATIONSHIP_TYPE));
RecoveryCleanupWorkCollector recoveryCleanupCollector = new GroupingRecoveryCleanupWorkCollector(scheduler, INDEX_CLEANUP, INDEX_CLEANUP_WORK, databaseLayout.getDatabaseName());
DatabaseExtensions extensions = instantiateRecoveryExtensions(databaseLayout, fs, config, logService, databasePageCache, scheduler, DbmsInfo.TOOL, monitors, tokenHolders, recoveryCleanupCollector, readOnlyChecker, extensionFactories, tracers.getPageCacheTracer());
DefaultIndexProviderMap indexProviderMap = new DefaultIndexProviderMap(extensions, config);
StorageEngine storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, config, databasePageCache, tokenHolders, schemaState, getConstraintSemantics(), indexProviderMap, NO_LOCK_SERVICE, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), databaseHealth, logService.getInternalLogProvider(), recoveryCleanupCollector, tracers.getPageCacheTracer(), true, readOnlyChecker, memoryTracker);
// Schema indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(NO_LOCK_SERVICE, storageEngine::newReader, config, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(config, storageEngine::newReader, NO_LOCKS, fullScanStoreView, NO_LOCK_SERVICE, logProvider);
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupCollector, readOnlyChecker, tracers.getPageCacheTracer());
IndexingService indexingService = Database.buildIndexingService(storageEngine, schemaState, indexStoreViewFactory, indexStatisticsStore, config, scheduler, indexProviderMap, tokenHolders, logProvider, logProvider, monitors.newMonitor(IndexingService.Monitor.class), tracers.getPageCacheTracer(), memoryTracker, databaseLayout.getDatabaseName(), readOnlyChecker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
Dependencies dependencies = new Dependencies();
dependencies.satisfyDependencies(databaseLayout, config, databasePageCache, fs, logProvider, tokenHolders, schemaState, getConstraintSemantics(), NO_LOCK_SERVICE, databaseHealth, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), readOnlyChecker, versionContextSupplier, logService, metadataProvider);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(logEntryReader).withConfig(config).withDependencies(dependencies).withMemoryTracker(memoryTracker).build();
boolean failOnCorruptedLogFiles = config.get(GraphDatabaseInternalSettings.fail_on_corrupted_log_files);
validateStoreId(logFiles, storageEngine.getStoreId(), config);
TransactionMetadataCache metadataCache = new TransactionMetadataCache();
PhysicalLogicalTransactionStore transactionStore = new PhysicalLogicalTransactionStore(logFiles, metadataCache, logEntryReader, monitors, failOnCorruptedLogFiles);
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, LogRotation.NO_ROTATION, metadataCache, metadataProvider, databaseHealth);
LifeSupport schemaLife = new LifeSupport();
schemaLife.add(storageEngine.schemaAndTokensLifecycle());
schemaLife.add(indexingService);
var doParallelRecovery = config.get(GraphDatabaseInternalSettings.do_parallel_recovery);
TransactionLogsRecovery transactionLogsRecovery = transactionLogRecovery(fs, metadataProvider, monitors.newMonitor(RecoveryMonitor.class), monitors.newMonitor(RecoveryStartInformationProvider.Monitor.class), logFiles, storageEngine, transactionStore, metadataProvider, schemaLife, databaseLayout, failOnCorruptedLogFiles, recoveryLog, startupChecker, tracers.getPageCacheTracer(), memoryTracker, doParallelRecovery);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
var checkpointAppender = logFiles.getCheckpointFile().getCheckpointAppender();
CheckPointerImpl checkPointer = new CheckPointerImpl(metadataProvider, RecoveryThreshold.INSTANCE, forceOperation, LogPruning.NO_PRUNING, checkpointAppender, databaseHealth, logProvider, tracers, IOController.DISABLED, new StoreCopyCheckPointMutex(), versionContextSupplier, clock);
recoveryLife.add(scheduler);
recoveryLife.add(recoveryCleanupCollector);
recoveryLife.add(extensions);
recoveryLife.add(indexProviderMap);
recoveryLife.add(storageEngine);
recoveryLife.add(new MissingTransactionLogsCheck(databaseLayout, config, fs, logFiles, recoveryLog));
recoveryLife.add(logFiles);
recoveryLife.add(transactionLogsRecovery);
recoveryLife.add(transactionAppender);
recoveryLife.add(checkPointer);
try {
recoveryLife.start();
if (databaseHealth.isHealthy()) {
checkPointer.forceCheckPoint(new SimpleTriggerInfo("Recovery completed."));
}
} finally {
recoveryLife.shutdown();
}
}
Aggregations