use of org.neo4j.internal.schema.SchemaRule in project neo4j by neo4j.
the class LogCommandSerializationV4_0 method writeSchemaRuleCommand.
@Override
public void writeSchemaRuleCommand(WritableChannel channel, Command.SchemaRuleCommand command) throws IOException {
channel.put(NeoCommandType.SCHEMA_RULE_COMMAND);
channel.putLong(command.getBefore().getId());
SchemaRule schemaRule = command.getSchemaRule();
boolean hasSchemaRule = schemaRule != null;
channel.put(hasSchemaRule ? SchemaRecord.COMMAND_HAS_SCHEMA_RULE : SchemaRecord.COMMAND_HAS_NO_SCHEMA_RULE);
writeSchemaRecord(channel, command.getBefore());
writeSchemaRecord(channel, command.getAfter());
if (hasSchemaRule) {
writeSchemaRule(channel, schemaRule);
}
}
use of org.neo4j.internal.schema.SchemaRule in project neo4j by neo4j.
the class NeoStoreTransactionApplier method visitSchemaRuleCommand.
@Override
public boolean visitSchemaRuleCommand(Command.SchemaRuleCommand command) {
// schema rules. Execute these after generating the property updates so. If executed
// before and we've got a transaction that sets properties/labels as well as creating an index
// we might end up with this corner-case:
// 1) index rule created and index population job started
// 2) index population job processes some nodes, but doesn't complete
// 3) we gather up property updates and send those to the indexes. The newly created population
// job might get those as updates
// 4) the population job will apply those updates as added properties, and might end up with duplicate
// entries for the same property
updateStore(neoStores.getSchemaStore(), command);
SchemaRule schemaRule = command.getSchemaRule();
boolean isConstraint = command.getAfter().isConstraint();
onSchemaRuleChange(command.getMode(), command.getKey(), schemaRule, isConstraint);
return false;
}
use of org.neo4j.internal.schema.SchemaRule in project neo4j by neo4j.
the class LogCommandSerializationV4_0Test method shouldReadSchemaCommand.
@RepeatedTest(100)
void shouldReadSchemaCommand() throws Exception {
// given
InMemoryClosableChannel channel = new InMemoryClosableChannel();
SchemaRecord before = createRandomSchemaRecord();
SchemaRecord after = createRandomSchemaRecord();
markAfterRecordAsCreatedIfCommandLooksCreated(before, after);
long id = after.getId();
SchemaRule rule = IndexPrototype.forSchema(SchemaDescriptor.forLabel(1, 2, 3)).withName("index_" + id).materialise(id);
writer().writeSchemaRuleCommand(channel, new Command.SchemaRuleCommand(writer(), before, after, rule));
CommandReader reader = createReader();
Command.SchemaRuleCommand command = (Command.SchemaRuleCommand) reader.read(channel);
assertBeforeAndAfterEquals(command, before, after);
}
use of org.neo4j.internal.schema.SchemaRule in project neo4j by neo4j.
the class Commands method createIndexRule.
public static SchemaRuleCommand createIndexRule(IndexProviderDescriptor providerDescriptor, long id, LabelSchemaDescriptor descriptor) {
SchemaRule rule = IndexPrototype.forSchema(descriptor, providerDescriptor).withName("index_" + id).materialise(id);
SchemaRecord before = new SchemaRecord(id).initialize(false, Record.NO_NEXT_PROPERTY.longValue());
SchemaRecord after = new SchemaRecord(id).initialize(true, 33);
return new SchemaRuleCommand(before, after, rule);
}
use of org.neo4j.internal.schema.SchemaRule in project neo4j by neo4j.
the class RecordStorageMigratorIT method mustMigrateSchemaStoreToNewFormat.
@ParameterizedTest
@MethodSource("versions")
void mustMigrateSchemaStoreToNewFormat(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// Given we have an old store full of random schema rules.
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
// Uses this special scan-on-open IGF because when the new IndexedIdGenerator was introduced this test would break
// when trying to open an older store, before doing migration.
IdGeneratorFactory igf = new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName());
LogProvider logProvider = logService.getInternalLogProvider();
// Prepare all the tokens we'll need.
StoreFactory legacyStoreFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, StandardV3_4.RECORD_FORMATS, logProvider, PageCacheTracer.NULL, writable(), immutable.empty());
NeoStores stores = legacyStoreFactory.openNeoStores(false, StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME);
createTokens(stores.getLabelTokenStore(), MAX_LABEL_ID);
createTokens(stores.getRelationshipTypeTokenStore(), MAX_RELATIONSHIP_TYPE_ID);
createTokens(stores.getPropertyKeyTokenStore(), MAX_PROPERTY_KEY_ID);
stores.close();
// Prepare the legacy schema store we'll migrate.
Path storeFile = databaseLayout.schemaStore();
Path idFile = databaseLayout.idSchemaStore();
SchemaStore35 schemaStore35 = new SchemaStore35(storeFile, idFile, CONFIG, IdType.SCHEMA, igf, pageCache, logProvider, StandardV3_4.RECORD_FORMATS, writable(), DEFAULT_DATABASE_NAME, immutable.empty());
schemaStore35.initialise(false, NULL);
SplittableRandom rng = new SplittableRandom(randomRule.seed());
LongHashSet indexes = new LongHashSet();
LongHashSet constraints = new LongHashSet();
for (int i = 0; i < 10; i++) {
long id = schemaStore35.nextId(NULL);
MutableLongSet target = rng.nextInt(3) < 2 ? indexes : constraints;
target.add(id);
}
List<SchemaRule> generatedRules = new ArrayList<>();
RealIdsRandomSchema randomSchema = new RealIdsRandomSchema(rng, indexes, constraints);
while (randomSchema.hasMoreIds()) {
try {
SchemaRule schemaRule = randomSchema.nextSchemaRule();
if (schemaRule instanceof ConstraintDescriptor) {
ConstraintDescriptor constraint = (ConstraintDescriptor) schemaRule;
if (constraint.isIndexBackedConstraint() && !constraint.asIndexBackedConstraint().hasOwnedIndexId()) {
// Filter out constraints that are supposed to own indexes, but don't, because those are illegal to persist.
randomSchema.rollback();
continue;
}
}
randomSchema.commit();
generatedRules.add(schemaRule);
List<DynamicRecord> dynamicRecords = allocateFrom(schemaStore35, schemaRule, NULL);
for (DynamicRecord dynamicRecord : dynamicRecords) {
schemaStore35.updateRecord(dynamicRecord, NULL);
}
} catch (NoSuchElementException ignore) {
// We're starting to run low on ids, but just ignore this and loop as along as there are still some left.
}
}
schemaStore35.flush(NULL);
schemaStore35.close();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// When we migrate it to the new store format.
String versionToMigrateTo = getVersionToMigrateTo(check);
ProgressReporter reporter = progressMonitor.startSection("section");
migrator.migrate(databaseLayout, migrationLayout, reporter, versionToMigrateFrom, versionToMigrateTo, EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, versionToMigrateTo);
generatedRules.sort(Comparator.comparingLong(SchemaRule::getId));
// Then the new store should retain an exact representation of the old-format schema rules.
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, logProvider, PageCacheTracer.NULL, writable());
try (NeoStores neoStores = storeFactory.openAllNeoStores()) {
SchemaStore schemaStore = neoStores.getSchemaStore();
TokenHolders tokenHolders = StoreTokens.readOnlyTokenHolders(neoStores, NULL);
SchemaStorage storage = new SchemaStorage(schemaStore, tokenHolders, () -> KernelVersion.LATEST);
List<SchemaRule> migratedRules = new ArrayList<>();
storage.getAll(NULL).iterator().forEachRemaining(migratedRules::add);
// Nerf the rule names, since migration may change those around.
migratedRules = migratedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
generatedRules = generatedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
assertThat(migratedRules).isEqualTo(generatedRules);
}
}
Aggregations