use of org.eclipse.collections.api.set.primitive.MutableLongSet in project neo4j by neo4j.
the class RecordStorageMigratorIT method mustMigrateSchemaStoreToNewFormat.
@ParameterizedTest
@MethodSource("versions")
void mustMigrateSchemaStoreToNewFormat(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// Given we have an old store full of random schema rules.
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
// Uses this special scan-on-open IGF because when the new IndexedIdGenerator was introduced this test would break
// when trying to open an older store, before doing migration.
IdGeneratorFactory igf = new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName());
LogProvider logProvider = logService.getInternalLogProvider();
// Prepare all the tokens we'll need.
StoreFactory legacyStoreFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, StandardV3_4.RECORD_FORMATS, logProvider, PageCacheTracer.NULL, writable(), immutable.empty());
NeoStores stores = legacyStoreFactory.openNeoStores(false, StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME);
createTokens(stores.getLabelTokenStore(), MAX_LABEL_ID);
createTokens(stores.getRelationshipTypeTokenStore(), MAX_RELATIONSHIP_TYPE_ID);
createTokens(stores.getPropertyKeyTokenStore(), MAX_PROPERTY_KEY_ID);
stores.close();
// Prepare the legacy schema store we'll migrate.
Path storeFile = databaseLayout.schemaStore();
Path idFile = databaseLayout.idSchemaStore();
SchemaStore35 schemaStore35 = new SchemaStore35(storeFile, idFile, CONFIG, IdType.SCHEMA, igf, pageCache, logProvider, StandardV3_4.RECORD_FORMATS, writable(), DEFAULT_DATABASE_NAME, immutable.empty());
schemaStore35.initialise(false, NULL);
SplittableRandom rng = new SplittableRandom(randomRule.seed());
LongHashSet indexes = new LongHashSet();
LongHashSet constraints = new LongHashSet();
for (int i = 0; i < 10; i++) {
long id = schemaStore35.nextId(NULL);
MutableLongSet target = rng.nextInt(3) < 2 ? indexes : constraints;
target.add(id);
}
List<SchemaRule> generatedRules = new ArrayList<>();
RealIdsRandomSchema randomSchema = new RealIdsRandomSchema(rng, indexes, constraints);
while (randomSchema.hasMoreIds()) {
try {
SchemaRule schemaRule = randomSchema.nextSchemaRule();
if (schemaRule instanceof ConstraintDescriptor) {
ConstraintDescriptor constraint = (ConstraintDescriptor) schemaRule;
if (constraint.isIndexBackedConstraint() && !constraint.asIndexBackedConstraint().hasOwnedIndexId()) {
// Filter out constraints that are supposed to own indexes, but don't, because those are illegal to persist.
randomSchema.rollback();
continue;
}
}
randomSchema.commit();
generatedRules.add(schemaRule);
List<DynamicRecord> dynamicRecords = allocateFrom(schemaStore35, schemaRule, NULL);
for (DynamicRecord dynamicRecord : dynamicRecords) {
schemaStore35.updateRecord(dynamicRecord, NULL);
}
} catch (NoSuchElementException ignore) {
// We're starting to run low on ids, but just ignore this and loop as along as there are still some left.
}
}
schemaStore35.flush(NULL);
schemaStore35.close();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// When we migrate it to the new store format.
String versionToMigrateTo = getVersionToMigrateTo(check);
ProgressReporter reporter = progressMonitor.startSection("section");
migrator.migrate(databaseLayout, migrationLayout, reporter, versionToMigrateFrom, versionToMigrateTo, EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, versionToMigrateTo);
generatedRules.sort(Comparator.comparingLong(SchemaRule::getId));
// Then the new store should retain an exact representation of the old-format schema rules.
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, logProvider, PageCacheTracer.NULL, writable());
try (NeoStores neoStores = storeFactory.openAllNeoStores()) {
SchemaStore schemaStore = neoStores.getSchemaStore();
TokenHolders tokenHolders = StoreTokens.readOnlyTokenHolders(neoStores, NULL);
SchemaStorage storage = new SchemaStorage(schemaStore, tokenHolders, () -> KernelVersion.LATEST);
List<SchemaRule> migratedRules = new ArrayList<>();
storage.getAll(NULL).iterator().forEachRemaining(migratedRules::add);
// Nerf the rule names, since migration may change those around.
migratedRules = migratedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
generatedRules = generatedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
assertThat(migratedRules).isEqualTo(generatedRules);
}
}
use of org.eclipse.collections.api.set.primitive.MutableLongSet in project neo4j by neo4j.
the class SimpleIdProvider method unacquiredIds.
LongIterator unacquiredIds() {
final MutableLongSet unacquiredIds = new LongHashSet();
releasedIds.forEach(pair -> unacquiredIds.add(pair.getValue()));
return unacquiredIds.longIterator();
}
use of org.eclipse.collections.api.set.primitive.MutableLongSet in project neo4j by neo4j.
the class CommonAbstractStoreBehaviourTest method shouldProvideFreeIdsToMissingIdGenerator.
@Test
void shouldProvideFreeIdsToMissingIdGenerator() throws IOException {
// given
createStore();
store.start(NULL);
MutableLongSet holes = LongSets.mutable.empty();
holes.add(store.nextId(NULL));
holes.add(store.nextId(NULL));
store.updateRecord(new IntRecord(store.nextId(NULL), 1), NULL);
holes.add(store.nextId(NULL));
store.updateRecord(new IntRecord(store.nextId(NULL), 1), NULL);
// when
store.close();
fs.deleteFile(Path.of(MyStore.ID_FILENAME));
createStore();
store.start(NULL);
// then
int numberOfHoles = holes.size();
for (int i = 0; i < numberOfHoles; i++) {
assertTrue(holes.remove(store.nextId(NULL)));
}
assertTrue(holes.isEmpty());
}
use of org.eclipse.collections.api.set.primitive.MutableLongSet in project neo4j by neo4j.
the class RelationshipChainCheckerTest method shouldReportReferencesOtherNodesForward.
@Test
void shouldReportReferencesOtherNodesForward() throws Exception {
shouldReportReferencesOtherNodes(true, relationship -> {
MutableLongSet set = LongSets.mutable.of(nodeId1, nodeId2, nodeId3);
set.remove(relationship.getFirstNode());
set.remove(relationship.getSecondNode());
relationship.setFirstNode(set.longIterator().next());
}, report -> {
report.sourceNextDoesNotReferenceBack(any());
report.sourceNextReferencesOtherNodes(any());
report.sourcePrevReferencesOtherNodes(any());
report.targetPrevReferencesOtherNodes(any());
});
}
use of org.eclipse.collections.api.set.primitive.MutableLongSet in project neo4j by neo4j.
the class LuceneIndexAccessorIT method shouldIterateAllDocumentsEvenWhenContainingDeletionsInOnlySomeLeaves.
@Test
void shouldIterateAllDocumentsEvenWhenContainingDeletionsInOnlySomeLeaves() throws Exception {
// given
int nodes = 300_000;
MutableLongSet expectedNodes = LongSets.mutable.empty();
IndexDescriptor indexDescriptor = IndexPrototype.forSchema(SchemaDescriptor.forLabel(1, 2, 3, 4, 5)).withName("TestIndex").materialise(99);
populateWithInitialNodes(indexDescriptor, nodes, expectedNodes);
try (IndexAccessor accessor = indexProvider.getOnlineAccessor(indexDescriptor, samplingConfig, mock(TokenNameLookup.class))) {
// when
removeSomeNodes(indexDescriptor, 2, accessor, expectedNodes);
// then
try (BoundedIterable<Long> reader = accessor.newAllEntriesValueReader(NULL)) {
MutableLongSet readIds = LongSets.mutable.empty();
reader.forEach(readIds::add);
assertThat(readIds).isEqualTo(expectedNodes);
}
}
}
Aggregations