Search in sources :

Example 1 with MetadataProvider

use of org.neo4j.storageengine.api.MetadataProvider in project neo4j by neo4j.

the class LogsUpgrader method upgrade.

public void upgrade(DatabaseLayout layout) {
    CommandReaderFactory commandReaderFactory = storageEngineFactory.commandReaderFactory();
    try (MetadataProvider store = getMetaDataStore()) {
        TransactionLogInitializer logInitializer = new TransactionLogInitializer(fs, store, commandReaderFactory, tracer);
        Path transactionLogsDirectory = layout.getTransactionLogsDirectory();
        Path legacyLogsDirectory = legacyLogsLocator.getTransactionLogsDirectory();
        boolean filesNeedsToMove = !transactionLogsDirectory.equals(legacyLogsDirectory);
        LogFiles logFiles = LogFilesBuilder.logFilesBasedOnlyBuilder(legacyLogsDirectory, fs).withCommandReaderFactory(commandReaderFactory).build();
        // Move log files to their intended directory, if they are not there already.
        Path[] legacyFiles = logFiles.logFiles();
        if (legacyFiles != null && legacyFiles.length > 0) {
            if (filesNeedsToMove) {
                for (Path legacyFile : legacyFiles) {
                    fs.copyFile(legacyFile, transactionLogsDirectory.resolve(legacyFile.getFileName()), EMPTY_COPY_OPTIONS);
                }
            }
            logInitializer.initializeExistingLogFiles(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
            if (filesNeedsToMove) {
                for (Path legacyFile : legacyFiles) {
                    fs.deleteFile(legacyFile);
                }
            }
        } else {
            // We didn't find any files in the legacy location.
            // If the legacy location is the same as the intended location, then the log files are missing entirely.
            // Otherwise, we will have to check if the log files are already present in the intended location and try to initialize them there.
            logFiles = LogFilesBuilder.logFilesBasedOnlyBuilder(transactionLogsDirectory, fs).build();
            legacyFiles = logFiles.logFiles();
            if (legacyFiles != null && legacyFiles.length > 0) {
                // The log files are already at their intended location, so initialize them there.
                logInitializer.initializeExistingLogFiles(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
            } else if (config.get(fail_on_missing_files)) {
                // recovered state or not.
                throw new UpgradeNotAllowedException();
            } else {
                // The log files are missing entirely, but we were told to not think of this as an error condition,
                // so we instead initialize an empty log file.
                logInitializer.initializeEmptyLogFile(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
            }
        }
    } catch (Exception exception) {
        throw new StoreUpgrader.TransactionLogsRelocationException("Failure on attempt to move transaction logs into new location.", exception);
    }
}
Also used : Path(java.nio.file.Path) TransactionLogInitializer(org.neo4j.kernel.impl.transaction.log.files.TransactionLogInitializer) MetadataProvider(org.neo4j.storageengine.api.MetadataProvider) CommandReaderFactory(org.neo4j.storageengine.api.CommandReaderFactory) LogFiles(org.neo4j.kernel.impl.transaction.log.files.LogFiles) UpgradeNotAllowedException(org.neo4j.storageengine.migration.UpgradeNotAllowedException) IOException(java.io.IOException) UpgradeNotAllowedException(org.neo4j.storageengine.migration.UpgradeNotAllowedException)

Example 2 with MetadataProvider

use of org.neo4j.storageengine.api.MetadataProvider in project neo4j by neo4j.

the class Database method start.

/**
 * Start the database and make it ready for transaction processing.
 * A database will automatically recover itself, if necessary, when started.
 * If the store files are obsolete (older than oldest supported version), then start will throw an exception.
 */
@Override
public synchronized void start() {
    if (started) {
        return;
    }
    // Ensure we're initialized
    init();
    try {
        // Upgrade the store before we begin
        upgradeStore(databaseConfig, databasePageCache, otherDatabaseMemoryTracker);
        // Check the tail of transaction logs and validate version
        LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
        LogFiles logFiles = getLogFiles(logEntryReader);
        databaseMonitors.addMonitorListener(new LoggingLogFileMonitor(msgLog));
        databaseMonitors.addMonitorListener(new LoggingLogTailScannerMonitor(internalLogProvider.getLog(AbstractLogTailScanner.class)));
        databaseMonitors.addMonitorListener(new ReverseTransactionCursorLoggingMonitor(internalLogProvider.getLog(ReversedSingleFileTransactionCursor.class)));
        var pageCacheTracer = tracers.getPageCacheTracer();
        boolean storageExists = storageEngineFactory.storageExists(fs, databaseLayout, databasePageCache);
        validateStoreAndTxLogs(logFiles, pageCacheTracer, storageExists);
        performRecovery(fs, databasePageCache, tracers, databaseConfig, databaseLayout, storageEngineFactory, false, internalLogProvider, databaseMonitors, extensionFactories, Optional.of(logFiles), new RecoveryStartupChecker(startupController, namedDatabaseId), otherDatabaseMemoryTracker, clock);
        // Build all modules and their services
        DatabaseSchemaState databaseSchemaState = new DatabaseSchemaState(internalLogProvider);
        idController.initialize(() -> kernelModule.kernelTransactions().get());
        storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, databaseConfig, databasePageCache, tokenHolders, databaseSchemaState, constraintSemantics, indexProviderMap, lockService, idGeneratorFactory, idController, databaseHealth, internalLogProvider, recoveryCleanupWorkCollector, pageCacheTracer, !storageExists, readOnlyDatabaseChecker, otherDatabaseMemoryTracker);
        MetadataProvider metadataProvider = storageEngine.metadataProvider();
        databaseDependencies.satisfyDependency(metadataProvider);
        // Recreate the logFiles after storage engine to get access to dependencies
        logFiles = getLogFiles(logEntryReader);
        life.add(storageEngine);
        life.add(storageEngine.schemaAndTokensLifecycle());
        life.add(logFiles);
        // Token indexes
        FullScanStoreView fullScanStoreView = new FullScanStoreView(lockService, storageEngine::newReader, databaseConfig, scheduler);
        IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(databaseConfig, storageEngine::newReader, locks, fullScanStoreView, lockService, internalLogProvider);
        // Schema indexes
        IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupWorkCollector, readOnlyDatabaseChecker, pageCacheTracer);
        IndexingService indexingService = buildIndexingService(storageEngine, databaseSchemaState, indexStoreViewFactory, indexStatisticsStore, pageCacheTracer, otherDatabaseMemoryTracker);
        databaseDependencies.satisfyDependency(storageEngine.countsAccessor());
        versionContextSupplier.init(metadataProvider::getLastClosedTransactionId);
        CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
        DatabaseTransactionLogModule transactionLogModule = buildTransactionLogs(logFiles, databaseConfig, internalLogProvider, scheduler, forceOperation, logEntryReader, metadataProvider, databaseMonitors, databaseDependencies);
        databaseTransactionEventListeners = new DatabaseTransactionEventListeners(databaseFacade, transactionEventListeners, namedDatabaseId);
        life.add(databaseTransactionEventListeners);
        final DatabaseKernelModule kernelModule = buildKernel(logFiles, transactionLogModule.transactionAppender(), indexingService, databaseSchemaState, storageEngine, metadataProvider, metadataProvider, databaseAvailabilityGuard, clock, indexStatisticsStore, leaseService);
        kernelModule.satisfyDependencies(databaseDependencies);
        // Do these assignments last so that we can ensure no cyclical dependencies exist
        this.kernelModule = kernelModule;
        databaseDependencies.satisfyDependency(databaseSchemaState);
        databaseDependencies.satisfyDependency(logEntryReader);
        databaseDependencies.satisfyDependency(storageEngine);
        databaseDependencies.satisfyDependency(indexingService);
        databaseDependencies.satisfyDependency(indexStoreViewFactory);
        databaseDependencies.satisfyDependency(indexStatisticsStore);
        databaseDependencies.satisfyDependency(indexProviderMap);
        databaseDependencies.satisfyDependency(forceOperation);
        databaseDependencies.satisfyDependency(new DatabaseEntityCounters(this.idGeneratorFactory, databaseDependencies.resolveDependency(CountsAccessor.class)));
        var providerSpi = QueryEngineProvider.spi(internalLogProvider, databaseMonitors, scheduler, life, getKernel(), databaseConfig);
        this.executionEngine = QueryEngineProvider.initialize(databaseDependencies, databaseFacade, engineProvider, isSystem(), providerSpi);
        this.checkpointerLifecycle = new CheckpointerLifecycle(transactionLogModule.checkPointer(), databaseHealth, ioController);
        life.add(databaseHealth);
        life.add(databaseAvailabilityGuard);
        life.add(databaseAvailability);
        life.setLast(checkpointerLifecycle);
        databaseDependencies.resolveDependency(DbmsDiagnosticsManager.class).dumpDatabaseDiagnostics(this);
        life.start();
        registerUpgradeListener();
        eventListeners.databaseStart(namedDatabaseId);
        /*
             * At this point recovery has completed and the database is ready for use. Whatever panic might have
             * happened before has been healed. So we can safely set the kernel health to ok.
             * This right now has any real effect only in the case of internal restarts (for example, after a store copy).
             * Standalone instances will have to be restarted by the user, as is proper for all database panics.
             */
        databaseHealth.healed();
        started = true;
        postStartupInit(storageExists);
    } catch (Throwable e) {
        handleStartupFailure(e);
    }
}
Also used : CheckPointerImpl(org.neo4j.kernel.impl.transaction.log.checkpoint.CheckPointerImpl) RecoveryStartupChecker(org.neo4j.kernel.recovery.RecoveryStartupChecker) LogFiles(org.neo4j.kernel.impl.transaction.log.files.LogFiles) VersionAwareLogEntryReader(org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader) LogEntryReader(org.neo4j.kernel.impl.transaction.log.entry.LogEntryReader) DbmsDiagnosticsManager(org.neo4j.kernel.diagnostics.providers.DbmsDiagnosticsManager) ReverseTransactionCursorLoggingMonitor(org.neo4j.kernel.impl.transaction.log.reverse.ReverseTransactionCursorLoggingMonitor) DatabaseSchemaState(org.neo4j.kernel.impl.api.DatabaseSchemaState) FullScanStoreView(org.neo4j.kernel.impl.transaction.state.storeview.FullScanStoreView) IndexingService(org.neo4j.kernel.impl.api.index.IndexingService) IndexStatisticsStore(org.neo4j.kernel.impl.api.index.stats.IndexStatisticsStore) VersionAwareLogEntryReader(org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader) LoggingLogFileMonitor(org.neo4j.kernel.impl.transaction.log.LoggingLogFileMonitor) CheckpointerLifecycle(org.neo4j.kernel.impl.transaction.log.checkpoint.CheckpointerLifecycle) LoggingLogTailScannerMonitor(org.neo4j.kernel.recovery.LoggingLogTailScannerMonitor) DatabaseEntityCounters(org.neo4j.kernel.impl.store.stats.DatabaseEntityCounters) IndexStoreViewFactory(org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory) MetadataProvider(org.neo4j.storageengine.api.MetadataProvider) DatabaseTransactionEventListeners(org.neo4j.kernel.internal.event.DatabaseTransactionEventListeners)

Example 3 with MetadataProvider

use of org.neo4j.storageengine.api.MetadataProvider in project neo4j by neo4j.

the class Recovery method performRecovery.

/**
 * Performs recovery of database described by provided layout.
 *
 * @param fs database filesystem
 * @param pageCache page cache used to perform database recovery.
 * @param tracers underlying operation tracers
 * @param config custom configuration
 * @param databaseLayout database to recover layout.
 * @param storageEngineFactory {@link StorageEngineFactory} for the storage to recover.
 * @param logProvider log provider
 * @param globalMonitors global server monitors
 * @param extensionFactories extension factories for extensions that should participate in recovery
 * @param providedLogFiles log files from database
 * @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
 * In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
 * even if the normal "is recovery required" checks says that recovery isn't required.
 * @throws IOException on any unexpected I/O exception encountered during recovery.
 */
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, LogProvider logProvider, Monitors globalMonitors, Iterable<ExtensionFactory<?>> extensionFactories, Optional<LogFiles> providedLogFiles, RecoveryStartupChecker startupChecker, MemoryTracker memoryTracker, Clock clock) throws IOException {
    Log recoveryLog = logProvider.getLog(Recovery.class);
    if (!forceRunRecovery && !isRecoveryRequired(fs, pageCache, databaseLayout, storageEngineFactory, config, providedLogFiles, memoryTracker)) {
        return;
    }
    checkAllFilesPresence(databaseLayout, fs, pageCache, storageEngineFactory);
    LifeSupport recoveryLife = new LifeSupport();
    Monitors monitors = new Monitors(globalMonitors, logProvider);
    DatabasePageCache databasePageCache = new DatabasePageCache(pageCache, IOController.DISABLED);
    SimpleLogService logService = new SimpleLogService(logProvider);
    VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
    DatabaseReadOnlyChecker readOnlyChecker = writable();
    DatabaseSchemaState schemaState = new DatabaseSchemaState(logProvider);
    JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
    VersionContextSupplier versionContextSupplier = EmptyVersionContextSupplier.EMPTY;
    DatabaseHealth databaseHealth = new DatabaseHealth(PanicEventGenerator.NO_OP, recoveryLog);
    TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_RELATIONSHIP_TYPE));
    RecoveryCleanupWorkCollector recoveryCleanupCollector = new GroupingRecoveryCleanupWorkCollector(scheduler, INDEX_CLEANUP, INDEX_CLEANUP_WORK, databaseLayout.getDatabaseName());
    DatabaseExtensions extensions = instantiateRecoveryExtensions(databaseLayout, fs, config, logService, databasePageCache, scheduler, DbmsInfo.TOOL, monitors, tokenHolders, recoveryCleanupCollector, readOnlyChecker, extensionFactories, tracers.getPageCacheTracer());
    DefaultIndexProviderMap indexProviderMap = new DefaultIndexProviderMap(extensions, config);
    StorageEngine storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, config, databasePageCache, tokenHolders, schemaState, getConstraintSemantics(), indexProviderMap, NO_LOCK_SERVICE, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), databaseHealth, logService.getInternalLogProvider(), recoveryCleanupCollector, tracers.getPageCacheTracer(), true, readOnlyChecker, memoryTracker);
    // Schema indexes
    FullScanStoreView fullScanStoreView = new FullScanStoreView(NO_LOCK_SERVICE, storageEngine::newReader, config, scheduler);
    IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(config, storageEngine::newReader, NO_LOCKS, fullScanStoreView, NO_LOCK_SERVICE, logProvider);
    IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupCollector, readOnlyChecker, tracers.getPageCacheTracer());
    IndexingService indexingService = Database.buildIndexingService(storageEngine, schemaState, indexStoreViewFactory, indexStatisticsStore, config, scheduler, indexProviderMap, tokenHolders, logProvider, logProvider, monitors.newMonitor(IndexingService.Monitor.class), tracers.getPageCacheTracer(), memoryTracker, databaseLayout.getDatabaseName(), readOnlyChecker);
    MetadataProvider metadataProvider = storageEngine.metadataProvider();
    Dependencies dependencies = new Dependencies();
    dependencies.satisfyDependencies(databaseLayout, config, databasePageCache, fs, logProvider, tokenHolders, schemaState, getConstraintSemantics(), NO_LOCK_SERVICE, databaseHealth, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), readOnlyChecker, versionContextSupplier, logService, metadataProvider);
    LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(logEntryReader).withConfig(config).withDependencies(dependencies).withMemoryTracker(memoryTracker).build();
    boolean failOnCorruptedLogFiles = config.get(GraphDatabaseInternalSettings.fail_on_corrupted_log_files);
    validateStoreId(logFiles, storageEngine.getStoreId(), config);
    TransactionMetadataCache metadataCache = new TransactionMetadataCache();
    PhysicalLogicalTransactionStore transactionStore = new PhysicalLogicalTransactionStore(logFiles, metadataCache, logEntryReader, monitors, failOnCorruptedLogFiles);
    BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, LogRotation.NO_ROTATION, metadataCache, metadataProvider, databaseHealth);
    LifeSupport schemaLife = new LifeSupport();
    schemaLife.add(storageEngine.schemaAndTokensLifecycle());
    schemaLife.add(indexingService);
    var doParallelRecovery = config.get(GraphDatabaseInternalSettings.do_parallel_recovery);
    TransactionLogsRecovery transactionLogsRecovery = transactionLogRecovery(fs, metadataProvider, monitors.newMonitor(RecoveryMonitor.class), monitors.newMonitor(RecoveryStartInformationProvider.Monitor.class), logFiles, storageEngine, transactionStore, metadataProvider, schemaLife, databaseLayout, failOnCorruptedLogFiles, recoveryLog, startupChecker, tracers.getPageCacheTracer(), memoryTracker, doParallelRecovery);
    CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
    var checkpointAppender = logFiles.getCheckpointFile().getCheckpointAppender();
    CheckPointerImpl checkPointer = new CheckPointerImpl(metadataProvider, RecoveryThreshold.INSTANCE, forceOperation, LogPruning.NO_PRUNING, checkpointAppender, databaseHealth, logProvider, tracers, IOController.DISABLED, new StoreCopyCheckPointMutex(), versionContextSupplier, clock);
    recoveryLife.add(scheduler);
    recoveryLife.add(recoveryCleanupCollector);
    recoveryLife.add(extensions);
    recoveryLife.add(indexProviderMap);
    recoveryLife.add(storageEngine);
    recoveryLife.add(new MissingTransactionLogsCheck(databaseLayout, config, fs, logFiles, recoveryLog));
    recoveryLife.add(logFiles);
    recoveryLife.add(transactionLogsRecovery);
    recoveryLife.add(transactionAppender);
    recoveryLife.add(checkPointer);
    try {
        recoveryLife.start();
        if (databaseHealth.isHealthy()) {
            checkPointer.forceCheckPoint(new SimpleTriggerInfo("Recovery completed."));
        }
    } finally {
        recoveryLife.shutdown();
    }
}
Also used : DatabaseHealth(org.neo4j.monitoring.DatabaseHealth) CheckPointerImpl(org.neo4j.kernel.impl.transaction.log.checkpoint.CheckPointerImpl) SimpleLogService(org.neo4j.logging.internal.SimpleLogService) LogFiles(org.neo4j.kernel.impl.transaction.log.files.LogFiles) BatchingTransactionAppender(org.neo4j.kernel.impl.transaction.log.BatchingTransactionAppender) RecoveryCleanupWorkCollector(org.neo4j.index.internal.gbptree.RecoveryCleanupWorkCollector) GroupingRecoveryCleanupWorkCollector(org.neo4j.index.internal.gbptree.GroupingRecoveryCleanupWorkCollector) StorageEngine(org.neo4j.storageengine.api.StorageEngine) StorageEngineFactory.selectStorageEngine(org.neo4j.storageengine.api.StorageEngineFactory.selectStorageEngine) DatabasePageCache(org.neo4j.dbms.database.DatabasePageCache) VersionContextSupplier(org.neo4j.io.pagecache.context.VersionContextSupplier) EmptyVersionContextSupplier(org.neo4j.io.pagecache.context.EmptyVersionContextSupplier) DefaultIdController(org.neo4j.internal.id.DefaultIdController) SimpleTriggerInfo(org.neo4j.kernel.impl.transaction.log.checkpoint.SimpleTriggerInfo) DatabaseSchemaState(org.neo4j.kernel.impl.api.DatabaseSchemaState) FullScanStoreView(org.neo4j.kernel.impl.transaction.state.storeview.FullScanStoreView) DatabaseReadOnlyChecker(org.neo4j.configuration.helpers.DatabaseReadOnlyChecker) IndexingService(org.neo4j.kernel.impl.api.index.IndexingService) IndexStatisticsStore(org.neo4j.kernel.impl.api.index.stats.IndexStatisticsStore) StoreCopyCheckPointMutex(org.neo4j.kernel.impl.transaction.log.checkpoint.StoreCopyCheckPointMutex) LifeSupport(org.neo4j.kernel.lifecycle.LifeSupport) VersionAwareLogEntryReader(org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader) Dependencies(org.neo4j.collection.Dependencies) TokenHolders(org.neo4j.token.TokenHolders) DelegatingTokenHolder(org.neo4j.token.DelegatingTokenHolder) JobScheduler(org.neo4j.scheduler.JobScheduler) PhysicalLogicalTransactionStore(org.neo4j.kernel.impl.transaction.log.PhysicalLogicalTransactionStore) NullLog(org.neo4j.logging.NullLog) Log(org.neo4j.logging.Log) DefaultIdGeneratorFactory(org.neo4j.internal.id.DefaultIdGeneratorFactory) DatabaseExtensions(org.neo4j.kernel.extension.DatabaseExtensions) TransactionMetadataCache(org.neo4j.kernel.impl.transaction.log.TransactionMetadataCache) GroupingRecoveryCleanupWorkCollector(org.neo4j.index.internal.gbptree.GroupingRecoveryCleanupWorkCollector) IndexStoreViewFactory(org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory) DefaultIndexProviderMap(org.neo4j.kernel.impl.transaction.state.DefaultIndexProviderMap) MetadataProvider(org.neo4j.storageengine.api.MetadataProvider) Monitors(org.neo4j.monitoring.Monitors) DefaultForceOperation(org.neo4j.kernel.database.DefaultForceOperation) ReadOnlyTokenCreator(org.neo4j.token.ReadOnlyTokenCreator)

Aggregations

LogFiles (org.neo4j.kernel.impl.transaction.log.files.LogFiles)3 MetadataProvider (org.neo4j.storageengine.api.MetadataProvider)3 DatabaseSchemaState (org.neo4j.kernel.impl.api.DatabaseSchemaState)2 IndexingService (org.neo4j.kernel.impl.api.index.IndexingService)2 IndexStatisticsStore (org.neo4j.kernel.impl.api.index.stats.IndexStatisticsStore)2 CheckPointerImpl (org.neo4j.kernel.impl.transaction.log.checkpoint.CheckPointerImpl)2 VersionAwareLogEntryReader (org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader)2 FullScanStoreView (org.neo4j.kernel.impl.transaction.state.storeview.FullScanStoreView)2 IndexStoreViewFactory (org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory)2 IOException (java.io.IOException)1 Path (java.nio.file.Path)1 Dependencies (org.neo4j.collection.Dependencies)1 DatabaseReadOnlyChecker (org.neo4j.configuration.helpers.DatabaseReadOnlyChecker)1 DatabasePageCache (org.neo4j.dbms.database.DatabasePageCache)1 GroupingRecoveryCleanupWorkCollector (org.neo4j.index.internal.gbptree.GroupingRecoveryCleanupWorkCollector)1 RecoveryCleanupWorkCollector (org.neo4j.index.internal.gbptree.RecoveryCleanupWorkCollector)1 DefaultIdController (org.neo4j.internal.id.DefaultIdController)1 DefaultIdGeneratorFactory (org.neo4j.internal.id.DefaultIdGeneratorFactory)1 EmptyVersionContextSupplier (org.neo4j.io.pagecache.context.EmptyVersionContextSupplier)1 VersionContextSupplier (org.neo4j.io.pagecache.context.VersionContextSupplier)1