use of org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory in project neo4j by neo4j.
the class MultiIndexPopulationConcurrentUpdatesIT method launchCustomIndexPopulation.
private void launchCustomIndexPopulation(GraphDatabaseSettings.SchemaIndex schemaIndex, Map<String, Integer> labelNameIdMap, int propertyId, Runnable customAction) throws Throwable {
RecordStorageEngine storageEngine = getStorageEngine();
try (Transaction transaction = db.beginTx()) {
Config config = Config.defaults();
KernelTransaction ktx = ((InternalTransaction) transaction).kernelTransaction();
JobScheduler scheduler = getJobScheduler();
NullLogProvider nullLogProvider = NullLogProvider.getInstance();
IndexStoreViewFactory indexStoreViewFactory = mock(IndexStoreViewFactory.class);
when(indexStoreViewFactory.createTokenIndexStoreView(any())).thenAnswer(invocation -> dynamicIndexStoreViewWrapper(customAction, storageEngine::newReader, invocation.getArgument(0), config, scheduler));
IndexProviderMap providerMap = getIndexProviderMap();
indexService = IndexingServiceFactory.createIndexingService(config, scheduler, providerMap, indexStoreViewFactory, ktx.tokenRead(), initialSchemaRulesLoader(storageEngine), nullLogProvider, nullLogProvider, IndexingService.NO_MONITOR, getSchemaState(), mock(IndexStatisticsStore.class), PageCacheTracer.NULL, INSTANCE, "", writable());
indexService.start();
rules = createIndexRules(schemaIndex, labelNameIdMap, propertyId);
schemaCache = new SchemaCache(new StandardConstraintSemantics(), providerMap);
schemaCache.load(iterable(rules));
indexService.createIndexes(AUTH_DISABLED, rules);
transaction.commit();
}
}
use of org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory in project neo4j by neo4j.
the class IndexingServiceTest method shouldNotHaveToWaitForOrphanedUniquenessIndexInRecovery.
@Test
void shouldNotHaveToWaitForOrphanedUniquenessIndexInRecovery() throws Exception {
// given that we have a uniqueness index that needs to be recovered and that doesn't have a constraint attached to it
IndexDescriptor descriptor = uniqueIndex.materialise(10);
Iterable<IndexDescriptor> schemaRules = Collections.singletonList(descriptor);
IndexProvider indexProvider = mock(IndexProvider.class);
when(indexProvider.getInitialState(any(), any())).thenReturn(POPULATING);
IndexProviderMap indexProviderMap = mock(IndexProviderMap.class);
when(indexProviderMap.lookup(anyString())).thenReturn(indexProvider);
when(indexProviderMap.lookup(any(IndexProviderDescriptor.class))).thenReturn(indexProvider);
when(indexProviderMap.getDefaultProvider()).thenReturn(indexProvider);
NullLogProvider logProvider = NullLogProvider.getInstance();
IndexMapReference indexMapReference = new IndexMapReference();
IndexProxyCreator indexProxyCreator = mock(IndexProxyCreator.class);
IndexProxy indexProxy = mock(IndexProxy.class);
when(indexProxy.getDescriptor()).thenReturn(descriptor);
// Eventually return ONLINE so that this test won't hang if the product code changes in this regard.
// This test should still fail below when verifying interactions with the proxy and monitor tho.
when(indexProxy.getState()).thenReturn(POPULATING, POPULATING, POPULATING, POPULATING, ONLINE);
when(indexProxyCreator.createRecoveringIndexProxy(any())).thenReturn(indexProxy);
when(indexProxyCreator.createFailedIndexProxy(any(), any())).thenReturn(indexProxy);
when(indexProxyCreator.createPopulatingIndexProxy(any(), anyBoolean(), any(), any())).thenReturn(indexProxy);
JobScheduler scheduler = mock(JobScheduler.class);
IndexSamplingController samplingController = mock(IndexSamplingController.class);
IndexingService.Monitor monitor = mock(IndexingService.Monitor.class);
IndexStoreViewFactory storeViewFactory = mock(IndexStoreViewFactory.class);
when(storeViewFactory.createTokenIndexStoreView(any())).thenReturn(storeView);
when(storeView.newPropertyAccessor(any(), any())).thenReturn(propertyAccessor);
IndexingService indexingService = new IndexingService(indexProxyCreator, indexProviderMap, indexMapReference, storeViewFactory, schemaRules, samplingController, nameLookup, scheduler, null, logProvider, logProvider, monitor, mock(IndexStatisticsStore.class), PageCacheTracer.NULL, INSTANCE, "", writable(), Config.defaults());
// and where index population starts
indexingService.init();
// when starting the indexing service
indexingService.start();
// then it should be able to start without awaiting the completion of the population of the index
verify(indexProxy, never()).awaitStoreScanCompleted(anyLong(), any());
verify(monitor, never()).awaitingPopulationOfRecoveredIndex(any());
}
use of org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory in project neo4j by neo4j.
the class Database method start.
/**
* Start the database and make it ready for transaction processing.
* A database will automatically recover itself, if necessary, when started.
* If the store files are obsolete (older than oldest supported version), then start will throw an exception.
*/
@Override
public synchronized void start() {
if (started) {
return;
}
// Ensure we're initialized
init();
try {
// Upgrade the store before we begin
upgradeStore(databaseConfig, databasePageCache, otherDatabaseMemoryTracker);
// Check the tail of transaction logs and validate version
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
LogFiles logFiles = getLogFiles(logEntryReader);
databaseMonitors.addMonitorListener(new LoggingLogFileMonitor(msgLog));
databaseMonitors.addMonitorListener(new LoggingLogTailScannerMonitor(internalLogProvider.getLog(AbstractLogTailScanner.class)));
databaseMonitors.addMonitorListener(new ReverseTransactionCursorLoggingMonitor(internalLogProvider.getLog(ReversedSingleFileTransactionCursor.class)));
var pageCacheTracer = tracers.getPageCacheTracer();
boolean storageExists = storageEngineFactory.storageExists(fs, databaseLayout, databasePageCache);
validateStoreAndTxLogs(logFiles, pageCacheTracer, storageExists);
performRecovery(fs, databasePageCache, tracers, databaseConfig, databaseLayout, storageEngineFactory, false, internalLogProvider, databaseMonitors, extensionFactories, Optional.of(logFiles), new RecoveryStartupChecker(startupController, namedDatabaseId), otherDatabaseMemoryTracker, clock);
// Build all modules and their services
DatabaseSchemaState databaseSchemaState = new DatabaseSchemaState(internalLogProvider);
idController.initialize(() -> kernelModule.kernelTransactions().get());
storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, databaseConfig, databasePageCache, tokenHolders, databaseSchemaState, constraintSemantics, indexProviderMap, lockService, idGeneratorFactory, idController, databaseHealth, internalLogProvider, recoveryCleanupWorkCollector, pageCacheTracer, !storageExists, readOnlyDatabaseChecker, otherDatabaseMemoryTracker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
databaseDependencies.satisfyDependency(metadataProvider);
// Recreate the logFiles after storage engine to get access to dependencies
logFiles = getLogFiles(logEntryReader);
life.add(storageEngine);
life.add(storageEngine.schemaAndTokensLifecycle());
life.add(logFiles);
// Token indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(lockService, storageEngine::newReader, databaseConfig, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(databaseConfig, storageEngine::newReader, locks, fullScanStoreView, lockService, internalLogProvider);
// Schema indexes
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupWorkCollector, readOnlyDatabaseChecker, pageCacheTracer);
IndexingService indexingService = buildIndexingService(storageEngine, databaseSchemaState, indexStoreViewFactory, indexStatisticsStore, pageCacheTracer, otherDatabaseMemoryTracker);
databaseDependencies.satisfyDependency(storageEngine.countsAccessor());
versionContextSupplier.init(metadataProvider::getLastClosedTransactionId);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
DatabaseTransactionLogModule transactionLogModule = buildTransactionLogs(logFiles, databaseConfig, internalLogProvider, scheduler, forceOperation, logEntryReader, metadataProvider, databaseMonitors, databaseDependencies);
databaseTransactionEventListeners = new DatabaseTransactionEventListeners(databaseFacade, transactionEventListeners, namedDatabaseId);
life.add(databaseTransactionEventListeners);
final DatabaseKernelModule kernelModule = buildKernel(logFiles, transactionLogModule.transactionAppender(), indexingService, databaseSchemaState, storageEngine, metadataProvider, metadataProvider, databaseAvailabilityGuard, clock, indexStatisticsStore, leaseService);
kernelModule.satisfyDependencies(databaseDependencies);
// Do these assignments last so that we can ensure no cyclical dependencies exist
this.kernelModule = kernelModule;
databaseDependencies.satisfyDependency(databaseSchemaState);
databaseDependencies.satisfyDependency(logEntryReader);
databaseDependencies.satisfyDependency(storageEngine);
databaseDependencies.satisfyDependency(indexingService);
databaseDependencies.satisfyDependency(indexStoreViewFactory);
databaseDependencies.satisfyDependency(indexStatisticsStore);
databaseDependencies.satisfyDependency(indexProviderMap);
databaseDependencies.satisfyDependency(forceOperation);
databaseDependencies.satisfyDependency(new DatabaseEntityCounters(this.idGeneratorFactory, databaseDependencies.resolveDependency(CountsAccessor.class)));
var providerSpi = QueryEngineProvider.spi(internalLogProvider, databaseMonitors, scheduler, life, getKernel(), databaseConfig);
this.executionEngine = QueryEngineProvider.initialize(databaseDependencies, databaseFacade, engineProvider, isSystem(), providerSpi);
this.checkpointerLifecycle = new CheckpointerLifecycle(transactionLogModule.checkPointer(), databaseHealth, ioController);
life.add(databaseHealth);
life.add(databaseAvailabilityGuard);
life.add(databaseAvailability);
life.setLast(checkpointerLifecycle);
databaseDependencies.resolveDependency(DbmsDiagnosticsManager.class).dumpDatabaseDiagnostics(this);
life.start();
registerUpgradeListener();
eventListeners.databaseStart(namedDatabaseId);
/*
* At this point recovery has completed and the database is ready for use. Whatever panic might have
* happened before has been healed. So we can safely set the kernel health to ok.
* This right now has any real effect only in the case of internal restarts (for example, after a store copy).
* Standalone instances will have to be restarted by the user, as is proper for all database panics.
*/
databaseHealth.healed();
started = true;
postStartupInit(storageExists);
} catch (Throwable e) {
handleStartupFailure(e);
}
}
use of org.neo4j.kernel.impl.transaction.state.storeview.IndexStoreViewFactory in project neo4j by neo4j.
the class Recovery method performRecovery.
/**
* Performs recovery of database described by provided layout.
*
* @param fs database filesystem
* @param pageCache page cache used to perform database recovery.
* @param tracers underlying operation tracers
* @param config custom configuration
* @param databaseLayout database to recover layout.
* @param storageEngineFactory {@link StorageEngineFactory} for the storage to recover.
* @param logProvider log provider
* @param globalMonitors global server monitors
* @param extensionFactories extension factories for extensions that should participate in recovery
* @param providedLogFiles log files from database
* @param forceRunRecovery to force recovery to run even if the usual checks indicates that it's not required.
* In specific cases, like after store copy there's always a need for doing a recovery or at least to start the db, checkpoint and shut down,
* even if the normal "is recovery required" checks says that recovery isn't required.
* @throws IOException on any unexpected I/O exception encountered during recovery.
*/
public static void performRecovery(FileSystemAbstraction fs, PageCache pageCache, DatabaseTracers tracers, Config config, DatabaseLayout databaseLayout, StorageEngineFactory storageEngineFactory, boolean forceRunRecovery, LogProvider logProvider, Monitors globalMonitors, Iterable<ExtensionFactory<?>> extensionFactories, Optional<LogFiles> providedLogFiles, RecoveryStartupChecker startupChecker, MemoryTracker memoryTracker, Clock clock) throws IOException {
Log recoveryLog = logProvider.getLog(Recovery.class);
if (!forceRunRecovery && !isRecoveryRequired(fs, pageCache, databaseLayout, storageEngineFactory, config, providedLogFiles, memoryTracker)) {
return;
}
checkAllFilesPresence(databaseLayout, fs, pageCache, storageEngineFactory);
LifeSupport recoveryLife = new LifeSupport();
Monitors monitors = new Monitors(globalMonitors, logProvider);
DatabasePageCache databasePageCache = new DatabasePageCache(pageCache, IOController.DISABLED);
SimpleLogService logService = new SimpleLogService(logProvider);
VersionAwareLogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
DatabaseReadOnlyChecker readOnlyChecker = writable();
DatabaseSchemaState schemaState = new DatabaseSchemaState(logProvider);
JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
VersionContextSupplier versionContextSupplier = EmptyVersionContextSupplier.EMPTY;
DatabaseHealth databaseHealth = new DatabaseHealth(PanicEventGenerator.NO_OP, recoveryLog);
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TYPE_RELATIONSHIP_TYPE));
RecoveryCleanupWorkCollector recoveryCleanupCollector = new GroupingRecoveryCleanupWorkCollector(scheduler, INDEX_CLEANUP, INDEX_CLEANUP_WORK, databaseLayout.getDatabaseName());
DatabaseExtensions extensions = instantiateRecoveryExtensions(databaseLayout, fs, config, logService, databasePageCache, scheduler, DbmsInfo.TOOL, monitors, tokenHolders, recoveryCleanupCollector, readOnlyChecker, extensionFactories, tracers.getPageCacheTracer());
DefaultIndexProviderMap indexProviderMap = new DefaultIndexProviderMap(extensions, config);
StorageEngine storageEngine = storageEngineFactory.instantiate(fs, databaseLayout, config, databasePageCache, tokenHolders, schemaState, getConstraintSemantics(), indexProviderMap, NO_LOCK_SERVICE, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), databaseHealth, logService.getInternalLogProvider(), recoveryCleanupCollector, tracers.getPageCacheTracer(), true, readOnlyChecker, memoryTracker);
// Schema indexes
FullScanStoreView fullScanStoreView = new FullScanStoreView(NO_LOCK_SERVICE, storageEngine::newReader, config, scheduler);
IndexStoreViewFactory indexStoreViewFactory = new IndexStoreViewFactory(config, storageEngine::newReader, NO_LOCKS, fullScanStoreView, NO_LOCK_SERVICE, logProvider);
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(databasePageCache, databaseLayout, recoveryCleanupCollector, readOnlyChecker, tracers.getPageCacheTracer());
IndexingService indexingService = Database.buildIndexingService(storageEngine, schemaState, indexStoreViewFactory, indexStatisticsStore, config, scheduler, indexProviderMap, tokenHolders, logProvider, logProvider, monitors.newMonitor(IndexingService.Monitor.class), tracers.getPageCacheTracer(), memoryTracker, databaseLayout.getDatabaseName(), readOnlyChecker);
MetadataProvider metadataProvider = storageEngine.metadataProvider();
Dependencies dependencies = new Dependencies();
dependencies.satisfyDependencies(databaseLayout, config, databasePageCache, fs, logProvider, tokenHolders, schemaState, getConstraintSemantics(), NO_LOCK_SERVICE, databaseHealth, new DefaultIdGeneratorFactory(fs, recoveryCleanupCollector, databaseLayout.getDatabaseName()), new DefaultIdController(), readOnlyChecker, versionContextSupplier, logService, metadataProvider);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(logEntryReader).withConfig(config).withDependencies(dependencies).withMemoryTracker(memoryTracker).build();
boolean failOnCorruptedLogFiles = config.get(GraphDatabaseInternalSettings.fail_on_corrupted_log_files);
validateStoreId(logFiles, storageEngine.getStoreId(), config);
TransactionMetadataCache metadataCache = new TransactionMetadataCache();
PhysicalLogicalTransactionStore transactionStore = new PhysicalLogicalTransactionStore(logFiles, metadataCache, logEntryReader, monitors, failOnCorruptedLogFiles);
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, LogRotation.NO_ROTATION, metadataCache, metadataProvider, databaseHealth);
LifeSupport schemaLife = new LifeSupport();
schemaLife.add(storageEngine.schemaAndTokensLifecycle());
schemaLife.add(indexingService);
var doParallelRecovery = config.get(GraphDatabaseInternalSettings.do_parallel_recovery);
TransactionLogsRecovery transactionLogsRecovery = transactionLogRecovery(fs, metadataProvider, monitors.newMonitor(RecoveryMonitor.class), monitors.newMonitor(RecoveryStartInformationProvider.Monitor.class), logFiles, storageEngine, transactionStore, metadataProvider, schemaLife, databaseLayout, failOnCorruptedLogFiles, recoveryLog, startupChecker, tracers.getPageCacheTracer(), memoryTracker, doParallelRecovery);
CheckPointerImpl.ForceOperation forceOperation = new DefaultForceOperation(indexingService, storageEngine);
var checkpointAppender = logFiles.getCheckpointFile().getCheckpointAppender();
CheckPointerImpl checkPointer = new CheckPointerImpl(metadataProvider, RecoveryThreshold.INSTANCE, forceOperation, LogPruning.NO_PRUNING, checkpointAppender, databaseHealth, logProvider, tracers, IOController.DISABLED, new StoreCopyCheckPointMutex(), versionContextSupplier, clock);
recoveryLife.add(scheduler);
recoveryLife.add(recoveryCleanupCollector);
recoveryLife.add(extensions);
recoveryLife.add(indexProviderMap);
recoveryLife.add(storageEngine);
recoveryLife.add(new MissingTransactionLogsCheck(databaseLayout, config, fs, logFiles, recoveryLog));
recoveryLife.add(logFiles);
recoveryLife.add(transactionLogsRecovery);
recoveryLife.add(transactionAppender);
recoveryLife.add(checkPointer);
try {
recoveryLife.start();
if (databaseHealth.isHealthy()) {
checkPointer.forceCheckPoint(new SimpleTriggerInfo("Recovery completed."));
}
} finally {
recoveryLife.shutdown();
}
}
Aggregations