use of org.neo4j.logging.internal.LogService in project neo4j by neo4j.
the class StoreMigratorTest method shouldGenerateTransactionInformationWhenLogsAreEmpty.
@Test
void shouldGenerateTransactionInformationWhenLogsAreEmpty() throws Exception {
// given
long txId = 1;
Path neoStore = databaseLayout.metadataStore();
Files.createFile(neoStore);
Config config = mock(Config.class);
LogService logService = new SimpleLogService(NullLogProvider.getInstance(), NullLogProvider.getInstance());
// when
// ... transaction info not in neo store
assertEquals(FIELD_NOT_PRESENT, getRecord(pageCache, neoStore, LAST_TRANSACTION_ID, databaseLayout.getDatabaseName(), NULL));
assertEquals(FIELD_NOT_PRESENT, getRecord(pageCache, neoStore, LAST_TRANSACTION_CHECKSUM, databaseLayout.getDatabaseName(), NULL));
assertEquals(FIELD_NOT_PRESENT, getRecord(pageCache, neoStore, LAST_TRANSACTION_COMMIT_TIMESTAMP, databaseLayout.getDatabaseName(), NULL));
// ... and with migrator
RecordStorageMigrator migrator = new RecordStorageMigrator(fileSystem, pageCache, config, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
TransactionId actual = migrator.extractTransactionIdInformation(neoStore, txId, databaseLayout, NULL);
// then
assertEquals(txId, actual.transactionId());
assertEquals(TransactionIdStore.BASE_TX_CHECKSUM, actual.checksum());
assertEquals(TransactionIdStore.BASE_TX_COMMIT_TIMESTAMP, actual.commitTimestamp());
}
use of org.neo4j.logging.internal.LogService in project neo4j by neo4j.
the class StoreMigratorTest method extractTransactionalInformationFromLogs.
private void extractTransactionalInformationFromLogs(Path customLogsLocation) throws IOException {
Config config = Config.defaults(transaction_logs_root_path, customLogsLocation);
LogService logService = new SimpleLogService(NullLogProvider.getInstance(), NullLogProvider.getInstance());
DatabaseManagementService managementService = new TestDatabaseManagementServiceBuilder(databaseLayout).setConfig(transaction_logs_root_path, customLogsLocation).build();
GraphDatabaseAPI database = (GraphDatabaseAPI) managementService.database(DEFAULT_DATABASE_NAME);
StorageEngineFactory storageEngineFactory = database.getDependencyResolver().resolveDependency(StorageEngineFactory.class);
for (int i = 0; i < 10; i++) {
try (Transaction transaction = database.beginTx()) {
transaction.createNode();
transaction.commit();
}
}
DatabaseLayout databaseLayout = database.databaseLayout();
Path neoStore = databaseLayout.metadataStore();
managementService.shutdown();
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_VERSION, MetaDataRecordFormat.FIELD_NOT_PRESENT, databaseLayout.getDatabaseName(), NULL);
RecordStorageMigrator migrator = new RecordStorageMigrator(fileSystem, pageCache, config, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
LogPosition logPosition = migrator.extractTransactionLogPosition(neoStore, databaseLayout, 100, NULL);
LogFiles logFiles = LogFilesBuilder.activeFilesBuilder(databaseLayout, fileSystem, pageCache).withConfig(config).withCommandReaderFactory(storageEngineFactory.commandReaderFactory()).build();
assertEquals(0, logPosition.getLogVersion());
assertEquals(Files.size(logFiles.getLogFile().getHighestLogFile()), logPosition.getByteOffset());
}
use of org.neo4j.logging.internal.LogService in project neo4j by neo4j.
the class RecordStorageMigratorIT method mustMigrateSchemaStoreToNewFormat.
@ParameterizedTest
@MethodSource("versions")
void mustMigrateSchemaStoreToNewFormat(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// Given we have an old store full of random schema rules.
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
// Uses this special scan-on-open IGF because when the new IndexedIdGenerator was introduced this test would break
// when trying to open an older store, before doing migration.
IdGeneratorFactory igf = new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName());
LogProvider logProvider = logService.getInternalLogProvider();
// Prepare all the tokens we'll need.
StoreFactory legacyStoreFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, StandardV3_4.RECORD_FORMATS, logProvider, PageCacheTracer.NULL, writable(), immutable.empty());
NeoStores stores = legacyStoreFactory.openNeoStores(false, StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME);
createTokens(stores.getLabelTokenStore(), MAX_LABEL_ID);
createTokens(stores.getRelationshipTypeTokenStore(), MAX_RELATIONSHIP_TYPE_ID);
createTokens(stores.getPropertyKeyTokenStore(), MAX_PROPERTY_KEY_ID);
stores.close();
// Prepare the legacy schema store we'll migrate.
Path storeFile = databaseLayout.schemaStore();
Path idFile = databaseLayout.idSchemaStore();
SchemaStore35 schemaStore35 = new SchemaStore35(storeFile, idFile, CONFIG, IdType.SCHEMA, igf, pageCache, logProvider, StandardV3_4.RECORD_FORMATS, writable(), DEFAULT_DATABASE_NAME, immutable.empty());
schemaStore35.initialise(false, NULL);
SplittableRandom rng = new SplittableRandom(randomRule.seed());
LongHashSet indexes = new LongHashSet();
LongHashSet constraints = new LongHashSet();
for (int i = 0; i < 10; i++) {
long id = schemaStore35.nextId(NULL);
MutableLongSet target = rng.nextInt(3) < 2 ? indexes : constraints;
target.add(id);
}
List<SchemaRule> generatedRules = new ArrayList<>();
RealIdsRandomSchema randomSchema = new RealIdsRandomSchema(rng, indexes, constraints);
while (randomSchema.hasMoreIds()) {
try {
SchemaRule schemaRule = randomSchema.nextSchemaRule();
if (schemaRule instanceof ConstraintDescriptor) {
ConstraintDescriptor constraint = (ConstraintDescriptor) schemaRule;
if (constraint.isIndexBackedConstraint() && !constraint.asIndexBackedConstraint().hasOwnedIndexId()) {
// Filter out constraints that are supposed to own indexes, but don't, because those are illegal to persist.
randomSchema.rollback();
continue;
}
}
randomSchema.commit();
generatedRules.add(schemaRule);
List<DynamicRecord> dynamicRecords = allocateFrom(schemaStore35, schemaRule, NULL);
for (DynamicRecord dynamicRecord : dynamicRecords) {
schemaStore35.updateRecord(dynamicRecord, NULL);
}
} catch (NoSuchElementException ignore) {
// We're starting to run low on ids, but just ignore this and loop as along as there are still some left.
}
}
schemaStore35.flush(NULL);
schemaStore35.close();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// When we migrate it to the new store format.
String versionToMigrateTo = getVersionToMigrateTo(check);
ProgressReporter reporter = progressMonitor.startSection("section");
migrator.migrate(databaseLayout, migrationLayout, reporter, versionToMigrateFrom, versionToMigrateTo, EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, versionToMigrateTo);
generatedRules.sort(Comparator.comparingLong(SchemaRule::getId));
// Then the new store should retain an exact representation of the old-format schema rules.
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, logProvider, PageCacheTracer.NULL, writable());
try (NeoStores neoStores = storeFactory.openAllNeoStores()) {
SchemaStore schemaStore = neoStores.getSchemaStore();
TokenHolders tokenHolders = StoreTokens.readOnlyTokenHolders(neoStores, NULL);
SchemaStorage storage = new SchemaStorage(schemaStore, tokenHolders, () -> KernelVersion.LATEST);
List<SchemaRule> migratedRules = new ArrayList<>();
storage.getAll(NULL).iterator().forEachRemaining(migratedRules::add);
// Nerf the rule names, since migration may change those around.
migratedRules = migratedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
generatedRules = generatedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
assertThat(migratedRules).isEqualTo(generatedRules);
}
}
use of org.neo4j.logging.internal.LogService in project neo4j by neo4j.
the class RecordStorageMigratorIT method shouldComputeTheLastTxLogPositionCorrectly.
@ParameterizedTest
@MethodSource("versions")
void shouldComputeTheLastTxLogPositionCorrectly(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Throwable {
// GIVEN a legacy database
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// WHEN migrating
migrator.migrate(databaseLayout, migrationLayout, progressMonitor.startSection("section"), versionToMigrateFrom, getVersionToMigrateTo(check), EMPTY);
// THEN it should compute the correct last tx log position
assertEquals(expectedLogPosition, migrator.readLastTxLogPosition(migrationLayout));
}
use of org.neo4j.logging.internal.LogService in project neo4j by neo4j.
the class RecordStorageMigratorIT method shouldBeAbleToMigrateWithoutErrors.
@ParameterizedTest
@MethodSource("versions")
void shouldBeAbleToMigrateWithoutErrors(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// GIVEN a legacy database
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
AssertableLogProvider logProvider = new AssertableLogProvider(true);
LogService logService = new SimpleLogService(logProvider, logProvider);
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// WHEN migrating
migrator.migrate(databaseLayout, migrationLayout, progressMonitor.startSection("section"), versionToMigrateFrom, getVersionToMigrateTo(check), EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, getVersionToMigrateTo(check));
// THEN starting the new store should be successful
assertThat(testDirectory.getFileSystem().fileExists(databaseLayout.relationshipGroupDegreesStore())).isTrue();
GBPTreeRelationshipGroupDegreesStore.DegreesRebuilder noRebuildAssertion = new GBPTreeRelationshipGroupDegreesStore.DegreesRebuilder() {
@Override
public void rebuild(RelationshipGroupDegreesStore.Updater updater, CursorContext cursorContext, MemoryTracker memoryTracker) {
throw new IllegalStateException("Rebuild should not be required");
}
@Override
public long lastCommittedTxId() {
try {
return new RecordStorageEngineFactory().readOnlyTransactionIdStore(fs, databaseLayout, pageCache, NULL).getLastCommittedTransactionId();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
};
try (GBPTreeRelationshipGroupDegreesStore groupDegreesStore = new GBPTreeRelationshipGroupDegreesStore(pageCache, databaseLayout.relationshipGroupDegreesStore(), testDirectory.getFileSystem(), immediate(), noRebuildAssertion, writable(), PageCacheTracer.NULL, GBPTreeGenericCountsStore.NO_MONITOR, databaseLayout.getDatabaseName(), counts_store_max_cached_entries.defaultValue())) {
// The rebuild would happen here in start and will throw exception (above) if invoked
groupDegreesStore.start(NULL, INSTANCE);
// The store keeps track of committed transactions.
// It is essential that it starts with the transaction
// that is the last committed one at the upgrade time.
assertEquals(TX_ID, groupDegreesStore.txId());
}
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName()), pageCache, fs, logService.getInternalLogProvider(), PageCacheTracer.NULL, writable());
storeFactory.openAllNeoStores().close();
assertThat(logProvider).forLevel(ERROR).doesNotHaveAnyLogs();
}
Aggregations