use of org.neo4j.kernel.impl.store.format.RecordFormats in project neo4j by neo4j.
the class VersionCommandTest method readsLatestStoreVersionCorrectly.
@Test
public void readsLatestStoreVersionCorrectly() throws Exception {
RecordFormats currentFormat = RecordFormatSelector.defaultFormat();
prepareNeoStoreFile(currentFormat.storeVersion());
execute(databaseDirectory.toString());
verify(out, times(3)).accept(outCaptor.capture());
assertEquals(Arrays.asList(String.format("Store format version: %s", currentFormat.storeVersion()), String.format("Introduced in version: %s", currentFormat.introductionVersion()), String.format("Current version: %s", Version.getNeo4jVersion())), outCaptor.getAllValues());
}
use of org.neo4j.kernel.impl.store.format.RecordFormats in project neo4j by neo4j.
the class NeoStoreDataSource method start.
@Override
public void start() throws IOException {
dependencies = new Dependencies();
life = new LifeSupport();
schemaIndexProvider = dependencyResolver.resolveDependency(SchemaIndexProvider.class, HighestSelectionStrategy.getInstance());
labelScanStoreProvider = dependencyResolver.resolveDependency(LabelScanStoreProvider.class, new NamedLabelScanStoreSelectionStrategy(config));
dependencyResolver.resolveDependency(LabelScanStoreProvider.class, new DeleteStoresFromOtherLabelScanStoreProviders(labelScanStoreProvider));
IndexConfigStore indexConfigStore = new IndexConfigStore(storeDir, fs);
dependencies.satisfyDependency(lockService);
dependencies.satisfyDependency(indexConfigStore);
life.add(indexConfigStore);
// Monitor listeners
LoggingLogFileMonitor loggingLogMonitor = new LoggingLogFileMonitor(msgLog);
monitors.addMonitorListener(loggingLogMonitor);
life.add(new Delegate(Lifecycles.multiple(indexProviders.values())));
// Upgrade the store before we begin
RecordFormats formats = selectStoreFormats(config, storeDir, fs, pageCache, logService);
upgradeStore(formats);
// Build all modules and their services
StorageEngine storageEngine = null;
try {
UpdateableSchemaState updateableSchemaState = new KernelSchemaStateStore(logProvider);
SynchronizedArrayIdOrderingQueue legacyIndexTransactionOrdering = new SynchronizedArrayIdOrderingQueue(20);
storageEngine = buildStorageEngine(propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, legacyIndexProviderLookup, indexConfigStore, updateableSchemaState::clear, legacyIndexTransactionOrdering);
LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader = new VersionAwareLogEntryReader<>(storageEngine.commandReaderFactory());
TransactionIdStore transactionIdStore = dependencies.resolveDependency(TransactionIdStore.class);
LogVersionRepository logVersionRepository = dependencies.resolveDependency(LogVersionRepository.class);
NeoStoreTransactionLogModule transactionLogModule = buildTransactionLogs(storeDir, config, logProvider, scheduler, fs, storageEngine, logEntryReader, legacyIndexTransactionOrdering, transactionIdStore, logVersionRepository);
transactionLogModule.satisfyDependencies(dependencies);
buildRecovery(fs, transactionIdStore, logVersionRepository, monitors.newMonitor(Recovery.Monitor.class), monitors.newMonitor(PositionToRecoverFrom.Monitor.class), transactionLogModule.logFiles(), startupStatistics, storageEngine, logEntryReader, transactionLogModule.logicalTransactionStore());
// At the time of writing this comes from the storage engine (IndexStoreView)
PropertyAccessor propertyAccessor = dependencies.resolveDependency(PropertyAccessor.class);
final NeoStoreKernelModule kernelModule = buildKernel(transactionLogModule.transactionAppender(), dependencies.resolveDependency(IndexingService.class), storageEngine.storeReadLayer(), updateableSchemaState, dependencies.resolveDependency(LabelScanStore.class), storageEngine, indexConfigStore, transactionIdStore, availabilityGuard, clock, propertyAccessor);
kernelModule.satisfyDependencies(dependencies);
// Do these assignments last so that we can ensure no cyclical dependencies exist
this.storageEngine = storageEngine;
this.transactionLogModule = transactionLogModule;
this.kernelModule = kernelModule;
dependencies.satisfyDependency(this);
dependencies.satisfyDependency(updateableSchemaState);
dependencies.satisfyDependency(storageEngine.storeReadLayer());
dependencies.satisfyDependency(logEntryReader);
dependencies.satisfyDependency(storageEngine);
} catch (Throwable e) {
// Something unexpected happened during startup
msgLog.warn("Exception occurred while setting up store modules. Attempting to close things down.", e);
try {
// Close the neostore, so that locks are released properly
if (storageEngine != null) {
storageEngine.forceClose();
}
} catch (Exception closeException) {
msgLog.error("Couldn't close neostore after startup failure", closeException);
}
throw Exceptions.launderedException(e);
}
// NOTE: please make sure this is performed after having added everything to the life, in fact we would like
// to perform the checkpointing as first step when the life is shutdown.
life.add(lifecycleToTriggerCheckPointOnShutdown());
try {
life.start();
} catch (Throwable e) {
// Something unexpected happened during startup
msgLog.warn("Exception occurred while starting the datasource. Attempting to close things down.", e);
try {
life.shutdown();
// Close the neostore, so that locks are released properly
storageEngine.forceClose();
} catch (Exception closeException) {
msgLog.error("Couldn't close neostore after startup failure", closeException);
}
throw Exceptions.launderedException(e);
}
/*
* At this point recovery has completed and the datasource is ready for use. Whatever panic might have
* happened before has been healed. So we can safely set the kernel health to ok.
* This right now has any real effect only in the case of internal restarts (for example, after a store copy
* in the case of HA). Standalone instances will have to be restarted by the user, as is proper for all
* kernel panics.
*/
databaseHealth.healed();
}
use of org.neo4j.kernel.impl.store.format.RecordFormats in project neo4j by neo4j.
the class InputEntityCacherTokenCreationTest method mockRecordFormats.
private RecordFormats mockRecordFormats(long maxPropertyKeyId, long maxLabelId, long maxRelationshipTypeId, long maxRelationshipGroupId) {
RecordFormats recordFormats = mock(RecordFormats.class);
RecordFormat propertyKeyTokenFormat = getRecordFormatMock(maxPropertyKeyId);
RecordFormat labelTokenFormat = getRecordFormatMock(maxLabelId);
RecordFormat relationshipTypeTokenFormat = getRecordFormatMock(maxRelationshipTypeId);
RecordFormat relationshipGroupTokenFormat = getRecordFormatMock(maxRelationshipGroupId);
when(recordFormats.propertyKeyToken()).thenReturn(propertyKeyTokenFormat);
when(recordFormats.labelToken()).thenReturn(labelTokenFormat);
when(recordFormats.relationshipTypeToken()).thenReturn(relationshipTypeTokenFormat);
when(recordFormats.relationshipGroup()).thenReturn(relationshipGroupTokenFormat);
return recordFormats;
}
use of org.neo4j.kernel.impl.store.format.RecordFormats in project neo4j by neo4j.
the class BatchingNeoStoresTest method shouldRespectDbConfig.
@Test
public void shouldRespectDbConfig() throws Exception {
// GIVEN
int size = 10;
Config config = Config.embeddedDefaults(stringMap(GraphDatabaseSettings.array_block_size.name(), String.valueOf(size), GraphDatabaseSettings.string_block_size.name(), String.valueOf(size)));
// WHEN
RecordFormats recordFormats = Standard.LATEST_RECORD_FORMATS;
int headerSize = recordFormats.dynamic().getRecordHeaderSize();
try (BatchingNeoStores store = BatchingNeoStores.batchingNeoStores(fsr.get(), storeDir, recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, config)) {
// THEN
assertEquals(size + headerSize, store.getPropertyStore().getArrayStore().getRecordSize());
assertEquals(size + headerSize, store.getPropertyStore().getStringStore().getRecordSize());
}
}
use of org.neo4j.kernel.impl.store.format.RecordFormats in project neo4j by neo4j.
the class MultipleIndexPopulationStressIT method createRandomData.
private void createRandomData(int count) throws IOException {
Config config = Config.empty();
RecordFormats recordFormats = RecordFormatSelector.selectForConfig(config, NullLogProvider.getInstance());
BatchImporter importer = new ParallelBatchImporter(directory.graphDbDir(), fileSystemRule.get(), DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), EMPTY, config, recordFormats);
importer.doImport(new RandomDataInput(count));
}
Aggregations