use of org.neo4j.token.TokenHolders in project neo4j by neo4j.
the class BatchingNeoStoresTest method someDataInTheDatabase.
private void someDataInTheDatabase(Config config) throws Exception {
NullLog nullLog = NullLog.getInstance();
try (JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
PageCache pageCache = new ConfiguringPageCacheFactory(fileSystem, Config.defaults(), PageCacheTracer.NULL, nullLog, scheduler, Clocks.nanoClock(), new MemoryPools()).getOrCreatePageCache();
Lifespan life = new Lifespan()) {
// TODO this little dance with TokenHolders is really annoying and must be solved with a better abstraction
DeferredInitializedTokenCreator propertyKeyTokenCreator = new DeferredInitializedTokenCreator() {
@Override
void create(String name, boolean internal, int id) {
txState.propertyKeyDoCreateForName(name, internal, id);
}
};
DeferredInitializedTokenCreator labelTokenCreator = new DeferredInitializedTokenCreator() {
@Override
void create(String name, boolean internal, int id) {
txState.labelDoCreateForName(name, internal, id);
}
};
DeferredInitializedTokenCreator relationshipTypeTokenCreator = new DeferredInitializedTokenCreator() {
@Override
void create(String name, boolean internal, int id) {
txState.relationshipTypeDoCreateForName(name, internal, id);
}
};
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(propertyKeyTokenCreator, TokenHolder.TYPE_PROPERTY_KEY), new DelegatingTokenHolder(labelTokenCreator, TokenHolder.TYPE_LABEL), new DelegatingTokenHolder(relationshipTypeTokenCreator, TokenHolder.TYPE_RELATIONSHIP_TYPE));
IndexConfigCompleter indexConfigCompleter = index -> index;
RecoveryCleanupWorkCollector recoveryCleanupWorkCollector = immediate();
RecordStorageEngine storageEngine = life.add(new RecordStorageEngine(databaseLayout, Config.defaults(), pageCache, fileSystem, NullLogProvider.getInstance(), tokenHolders, new DatabaseSchemaState(NullLogProvider.getInstance()), new StandardConstraintSemantics(), indexConfigCompleter, LockService.NO_LOCK_SERVICE, new DatabaseHealth(PanicEventGenerator.NO_OP, nullLog), new DefaultIdGeneratorFactory(fileSystem, immediate(), DEFAULT_DATABASE_NAME), new DefaultIdController(), recoveryCleanupWorkCollector, PageCacheTracer.NULL, true, INSTANCE, writable(), CommandLockVerification.Factory.IGNORE, LockVerificationMonitor.Factory.IGNORE));
// Create the relationship type token
TxState txState = new TxState();
NeoStores neoStores = storageEngine.testAccessNeoStores();
CommandCreationContext commandCreationContext = storageEngine.newCommandCreationContext(INSTANCE);
commandCreationContext.initialize(NULL);
propertyKeyTokenCreator.initialize(neoStores.getPropertyKeyTokenStore(), txState);
labelTokenCreator.initialize(neoStores.getLabelTokenStore(), txState);
relationshipTypeTokenCreator.initialize(neoStores.getRelationshipTypeTokenStore(), txState);
int relTypeId = tokenHolders.relationshipTypeTokens().getOrCreateId(RELTYPE.name());
apply(txState, commandCreationContext, storageEngine);
// Finally, we're initialized and ready to create two nodes and a relationship
txState = new TxState();
long node1 = commandCreationContext.reserveNode();
long node2 = commandCreationContext.reserveNode();
txState.nodeDoCreate(node1);
txState.nodeDoCreate(node2);
txState.relationshipDoCreate(commandCreationContext.reserveRelationship(), relTypeId, node1, node2);
apply(txState, commandCreationContext, storageEngine);
neoStores.flush(NULL);
}
}
use of org.neo4j.token.TokenHolders in project neo4j by neo4j.
the class RecordStorageMigratorIT method mustMigrateSchemaStoreToNewFormat.
@ParameterizedTest
@MethodSource("versions")
void mustMigrateSchemaStoreToNewFormat(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// Given we have an old store full of random schema rules.
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
// Uses this special scan-on-open IGF because when the new IndexedIdGenerator was introduced this test would break
// when trying to open an older store, before doing migration.
IdGeneratorFactory igf = new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName());
LogProvider logProvider = logService.getInternalLogProvider();
// Prepare all the tokens we'll need.
StoreFactory legacyStoreFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, StandardV3_4.RECORD_FORMATS, logProvider, PageCacheTracer.NULL, writable(), immutable.empty());
NeoStores stores = legacyStoreFactory.openNeoStores(false, StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME);
createTokens(stores.getLabelTokenStore(), MAX_LABEL_ID);
createTokens(stores.getRelationshipTypeTokenStore(), MAX_RELATIONSHIP_TYPE_ID);
createTokens(stores.getPropertyKeyTokenStore(), MAX_PROPERTY_KEY_ID);
stores.close();
// Prepare the legacy schema store we'll migrate.
Path storeFile = databaseLayout.schemaStore();
Path idFile = databaseLayout.idSchemaStore();
SchemaStore35 schemaStore35 = new SchemaStore35(storeFile, idFile, CONFIG, IdType.SCHEMA, igf, pageCache, logProvider, StandardV3_4.RECORD_FORMATS, writable(), DEFAULT_DATABASE_NAME, immutable.empty());
schemaStore35.initialise(false, NULL);
SplittableRandom rng = new SplittableRandom(randomRule.seed());
LongHashSet indexes = new LongHashSet();
LongHashSet constraints = new LongHashSet();
for (int i = 0; i < 10; i++) {
long id = schemaStore35.nextId(NULL);
MutableLongSet target = rng.nextInt(3) < 2 ? indexes : constraints;
target.add(id);
}
List<SchemaRule> generatedRules = new ArrayList<>();
RealIdsRandomSchema randomSchema = new RealIdsRandomSchema(rng, indexes, constraints);
while (randomSchema.hasMoreIds()) {
try {
SchemaRule schemaRule = randomSchema.nextSchemaRule();
if (schemaRule instanceof ConstraintDescriptor) {
ConstraintDescriptor constraint = (ConstraintDescriptor) schemaRule;
if (constraint.isIndexBackedConstraint() && !constraint.asIndexBackedConstraint().hasOwnedIndexId()) {
// Filter out constraints that are supposed to own indexes, but don't, because those are illegal to persist.
randomSchema.rollback();
continue;
}
}
randomSchema.commit();
generatedRules.add(schemaRule);
List<DynamicRecord> dynamicRecords = allocateFrom(schemaStore35, schemaRule, NULL);
for (DynamicRecord dynamicRecord : dynamicRecords) {
schemaStore35.updateRecord(dynamicRecord, NULL);
}
} catch (NoSuchElementException ignore) {
// We're starting to run low on ids, but just ignore this and loop as along as there are still some left.
}
}
schemaStore35.flush(NULL);
schemaStore35.close();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// When we migrate it to the new store format.
String versionToMigrateTo = getVersionToMigrateTo(check);
ProgressReporter reporter = progressMonitor.startSection("section");
migrator.migrate(databaseLayout, migrationLayout, reporter, versionToMigrateFrom, versionToMigrateTo, EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, versionToMigrateTo);
generatedRules.sort(Comparator.comparingLong(SchemaRule::getId));
// Then the new store should retain an exact representation of the old-format schema rules.
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, logProvider, PageCacheTracer.NULL, writable());
try (NeoStores neoStores = storeFactory.openAllNeoStores()) {
SchemaStore schemaStore = neoStores.getSchemaStore();
TokenHolders tokenHolders = StoreTokens.readOnlyTokenHolders(neoStores, NULL);
SchemaStorage storage = new SchemaStorage(schemaStore, tokenHolders, () -> KernelVersion.LATEST);
List<SchemaRule> migratedRules = new ArrayList<>();
storage.getAll(NULL).iterator().forEachRemaining(migratedRules::add);
// Nerf the rule names, since migration may change those around.
migratedRules = migratedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
generatedRules = generatedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
assertThat(migratedRules).isEqualTo(generatedRules);
}
}
use of org.neo4j.token.TokenHolders in project neo4j by neo4j.
the class SchemaRuleMigrationTest method setUp.
@BeforeEach
void setUp() {
srcTokenHolders = new TokenHolders(StoreTokens.createReadOnlyTokenHolder(TokenHolder.TYPE_PROPERTY_KEY), StoreTokens.createReadOnlyTokenHolder(TokenHolder.TYPE_LABEL), StoreTokens.createReadOnlyTokenHolder(TokenHolder.TYPE_RELATIONSHIP_TYPE));
src = mock(SchemaStorage35.class);
writtenRules = new ArrayList<>();
dst = new SchemaRuleMigrationAccess() {
@Override
public void writeSchemaRule(SchemaRule rule) {
writtenRules.add(rule);
}
@Override
public Iterable<SchemaRule> getAll() {
return List.of();
}
@Override
public void close() {
}
};
}
use of org.neo4j.token.TokenHolders in project neo4j by neo4j.
the class BatchInsertTest method shouldCreateConsistentUniquenessConstraint.
@ParameterizedTest
@MethodSource("params")
void shouldCreateConsistentUniquenessConstraint(int denseNodeThreshold) throws Exception {
// given
BatchInserter inserter = newBatchInserter(denseNodeThreshold);
// when
inserter.createDeferredConstraint(label("Hacker")).assertPropertyIsUnique("handle").create();
// then
GraphDatabaseAPI graphdb = switchToEmbeddedGraphDatabaseService(inserter, denseNodeThreshold);
try {
NeoStores neoStores = graphdb.getDependencyResolver().resolveDependency(RecordStorageEngine.class).testAccessNeoStores();
SchemaStore store = neoStores.getSchemaStore();
TokenHolders tokenHolders = graphdb.getDependencyResolver().resolveDependency(TokenHolders.class);
SchemaRuleAccess schemaRuleAccess = SchemaRuleAccess.getSchemaRuleAccess(store, tokenHolders, () -> KernelVersion.LATEST);
List<Long> inUse = new ArrayList<>();
SchemaRecord record = store.newRecord();
for (long i = 1, high = store.getHighestPossibleIdInUse(NULL); i <= high; i++) {
store.getRecord(i, record, RecordLoad.FORCE, NULL);
if (record.inUse()) {
inUse.add(i);
}
}
assertEquals(2, inUse.size(), "records in use");
SchemaRule rule0 = schemaRuleAccess.loadSingleSchemaRule(inUse.get(0), NULL);
SchemaRule rule1 = schemaRuleAccess.loadSingleSchemaRule(inUse.get(1), NULL);
IndexDescriptor indexRule;
ConstraintDescriptor constraint;
if (rule0 instanceof IndexDescriptor) {
indexRule = (IndexDescriptor) rule0;
constraint = (ConstraintDescriptor) rule1;
} else {
constraint = (ConstraintDescriptor) rule0;
indexRule = (IndexDescriptor) rule1;
}
OptionalLong owningConstraintId = indexRule.getOwningConstraintId();
assertTrue(owningConstraintId.isPresent(), "index should have owning constraint");
assertEquals(constraint.getId(), owningConstraintId.getAsLong(), "index should reference constraint");
assertEquals(indexRule.getId(), constraint.asIndexBackedConstraint().ownedIndexId(), "constraint should reference index");
} finally {
managementService.shutdown();
}
}
use of org.neo4j.token.TokenHolders in project neo4j by neo4j.
the class Recovery method instantiateRecoveryExtensions.
private static DatabaseExtensions instantiateRecoveryExtensions(DatabaseLayout databaseLayout, FileSystemAbstraction fileSystem, Config config, LogService logService, PageCache pageCache, JobScheduler jobScheduler, DbmsInfo dbmsInfo, Monitors monitors, TokenHolders tokenHolders, RecoveryCleanupWorkCollector recoveryCleanupCollector, DatabaseReadOnlyChecker readOnlyChecker, Iterable<ExtensionFactory<?>> extensionFactories, PageCacheTracer pageCacheTracer) {
List<ExtensionFactory<?>> recoveryExtensions = stream(extensionFactories).filter(extension -> extension.getClass().isAnnotationPresent(RecoveryExtension.class)).collect(toList());
Dependencies deps = new Dependencies();
NonListenableMonitors nonListenableMonitors = new NonListenableMonitors(monitors, logService.getInternalLogProvider());
deps.satisfyDependencies(fileSystem, config, logService, pageCache, nonListenableMonitors, jobScheduler, tokenHolders, recoveryCleanupCollector, pageCacheTracer, databaseLayout, readOnlyChecker);
DatabaseExtensionContext extensionContext = new DatabaseExtensionContext(databaseLayout, dbmsInfo, deps);
return new DatabaseExtensions(extensionContext, recoveryExtensions, deps, ExtensionFailureStrategies.fail());
}
Aggregations