use of org.neo4j.internal.schema.ConstraintDescriptor in project neo4j by neo4j.
the class ConstraintTestBase method shouldFindConstraintsBySchema.
@Test
void shouldFindConstraintsBySchema() throws Exception {
// GIVEN
addConstraints("FOO", "prop");
try (KernelTransaction tx = beginTransaction()) {
int label = tx.tokenWrite().labelGetOrCreateForName("FOO");
int prop = tx.tokenWrite().propertyKeyGetOrCreateForName("prop");
LabelSchemaDescriptor descriptor = labelSchemaDescriptor(label, prop);
// WHEN
List<ConstraintDescriptor> constraints = asList(tx.schemaRead().constraintsGetForSchema(descriptor));
// THEN
assertThat(constraints).hasSize(1);
assertThat(constraints.get(0).schema().getPropertyId()).isEqualTo(prop);
}
}
use of org.neo4j.internal.schema.ConstraintDescriptor in project neo4j by neo4j.
the class SchemaStatementProcedure method createSchemaStatementResults.
static Collection<BuiltInProcedures.SchemaStatementResult> createSchemaStatementResults(SchemaReadCore schemaRead, TokenRead tokenRead) throws ProcedureException {
Map<String, BuiltInProcedures.SchemaStatementResult> schemaStatements = new HashMap<>();
// Indexes
// If index is unique the assumption is that it is backing a constraint and will not be included.
final Iterator<IndexDescriptor> allIndexes = schemaRead.indexesGetAll();
while (allIndexes.hasNext()) {
final IndexDescriptor index = allIndexes.next();
if (includeIndex(schemaRead, index)) {
final String name = index.getName();
String type = SchemaRuleType.INDEX.name();
final String createStatement = createStatement(tokenRead, index);
final String dropStatement = dropStatement(index);
schemaStatements.put(name, new BuiltInProcedures.SchemaStatementResult(name, type, createStatement, dropStatement));
}
}
// Constraints
Iterator<ConstraintDescriptor> allConstraints = schemaRead.constraintsGetAll();
while (allConstraints.hasNext()) {
ConstraintDescriptor constraint = allConstraints.next();
if (includeConstraint(schemaRead, constraint)) {
String name = constraint.getName();
String type = SchemaRuleType.CONSTRAINT.name();
String createStatement = createStatement(schemaRead::indexGetForName, tokenRead, constraint);
String dropStatement = dropStatement(constraint);
schemaStatements.put(name, new BuiltInProcedures.SchemaStatementResult(name, type, createStatement, dropStatement));
}
}
return schemaStatements.values();
}
use of org.neo4j.internal.schema.ConstraintDescriptor in project neo4j by neo4j.
the class SchemaProcedure method buildSchemaGraph.
public GraphResult buildSchemaGraph() {
final Map<String, VirtualNodeHack> nodes = new HashMap<>();
final Map<String, Set<VirtualRelationshipHack>> relationships = new HashMap<>();
final KernelTransaction kernelTransaction = internalTransaction.kernelTransaction();
AccessMode mode = kernelTransaction.securityContext().mode();
try (KernelTransaction.Revertable ignore = kernelTransaction.overrideWith(SecurityContext.AUTH_DISABLED)) {
Read dataRead = kernelTransaction.dataRead();
TokenRead tokenRead = kernelTransaction.tokenRead();
SchemaRead schemaRead = kernelTransaction.schemaRead();
List<Pair<String, Integer>> labelNamesAndIds = new ArrayList<>();
// Get all labels that are in use as seen by a super user
List<Label> labelsInUse = stream(LABELS.inUse(kernelTransaction)).collect(Collectors.toList());
for (Label label : labelsInUse) {
String labelName = label.name();
int labelId = tokenRead.nodeLabel(labelName);
// Filter out labels that are denied or aren't explicitly allowed
if (mode.allowsTraverseNode(labelId)) {
labelNamesAndIds.add(Pair.of(labelName, labelId));
Map<String, Object> properties = new HashMap<>();
Iterator<IndexDescriptor> indexReferences = schemaRead.indexesGetForLabel(labelId);
List<String> indexes = new ArrayList<>();
while (indexReferences.hasNext()) {
IndexDescriptor index = indexReferences.next();
if (!index.isUnique()) {
String[] propertyNames = PropertyNameUtils.getPropertyKeys(tokenRead, index.schema().getPropertyIds());
indexes.add(String.join(",", propertyNames));
}
}
properties.put("indexes", indexes);
Iterator<ConstraintDescriptor> nodePropertyConstraintIterator = schemaRead.constraintsGetForLabel(labelId);
List<String> constraints = new ArrayList<>();
while (nodePropertyConstraintIterator.hasNext()) {
ConstraintDescriptor constraint = nodePropertyConstraintIterator.next();
constraints.add(constraint.userDescription(tokenRead));
}
properties.put("constraints", constraints);
getOrCreateLabel(label.name(), properties, nodes);
}
}
// Get all relTypes that are in use as seen by a super user
List<RelationshipType> relTypesInUse = stream(RELATIONSHIP_TYPES.inUse(kernelTransaction)).collect(Collectors.toList());
for (RelationshipType relationshipType : relTypesInUse) {
String relationshipTypeGetName = relationshipType.name();
int relId = tokenRead.relationshipType(relationshipTypeGetName);
// Filter out relTypes that are denied or aren't explicitly allowed
if (mode.allowsTraverseRelType(relId)) {
List<VirtualNodeHack> startNodes = new LinkedList<>();
List<VirtualNodeHack> endNodes = new LinkedList<>();
for (Pair<String, Integer> labelNameAndId : labelNamesAndIds) {
String labelName = labelNameAndId.first();
int labelId = labelNameAndId.other();
Map<String, Object> properties = new HashMap<>();
VirtualNodeHack node = getOrCreateLabel(labelName, properties, nodes);
if (dataRead.countsForRelationship(labelId, relId, TokenRead.ANY_LABEL) > 0) {
startNodes.add(node);
}
if (dataRead.countsForRelationship(TokenRead.ANY_LABEL, relId, labelId) > 0) {
endNodes.add(node);
}
}
for (VirtualNodeHack startNode : startNodes) {
for (VirtualNodeHack endNode : endNodes) {
addRelationship(startNode, endNode, relationshipTypeGetName, relationships);
}
}
}
}
}
return getGraphResult(nodes, relationships);
}
use of org.neo4j.internal.schema.ConstraintDescriptor in project neo4j by neo4j.
the class RecordStorageMigratorIT method mustMigrateSchemaStoreToNewFormat.
@ParameterizedTest
@MethodSource("versions")
void mustMigrateSchemaStoreToNewFormat(String version, LogPosition expectedLogPosition, Function<TransactionId, Boolean> txIdComparator) throws Exception {
// Given we have an old store full of random schema rules.
Path prepare = testDirectory.directory("prepare");
var fs = testDirectory.getFileSystem();
MigrationTestUtils.prepareSampleLegacyDatabase(version, fs, databaseLayout.databaseDirectory(), prepare);
// and a state of the migration saying that it has done the actual migration
LogService logService = NullLogService.getInstance();
// Uses this special scan-on-open IGF because when the new IndexedIdGenerator was introduced this test would break
// when trying to open an older store, before doing migration.
IdGeneratorFactory igf = new ScanOnOpenOverwritingIdGeneratorFactory(fs, databaseLayout.getDatabaseName());
LogProvider logProvider = logService.getInternalLogProvider();
// Prepare all the tokens we'll need.
StoreFactory legacyStoreFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, StandardV3_4.RECORD_FORMATS, logProvider, PageCacheTracer.NULL, writable(), immutable.empty());
NeoStores stores = legacyStoreFactory.openNeoStores(false, StoreType.LABEL_TOKEN, StoreType.LABEL_TOKEN_NAME, StoreType.RELATIONSHIP_TYPE_TOKEN, StoreType.RELATIONSHIP_TYPE_TOKEN_NAME, StoreType.PROPERTY_KEY_TOKEN, StoreType.PROPERTY_KEY_TOKEN_NAME);
createTokens(stores.getLabelTokenStore(), MAX_LABEL_ID);
createTokens(stores.getRelationshipTypeTokenStore(), MAX_RELATIONSHIP_TYPE_ID);
createTokens(stores.getPropertyKeyTokenStore(), MAX_PROPERTY_KEY_ID);
stores.close();
// Prepare the legacy schema store we'll migrate.
Path storeFile = databaseLayout.schemaStore();
Path idFile = databaseLayout.idSchemaStore();
SchemaStore35 schemaStore35 = new SchemaStore35(storeFile, idFile, CONFIG, IdType.SCHEMA, igf, pageCache, logProvider, StandardV3_4.RECORD_FORMATS, writable(), DEFAULT_DATABASE_NAME, immutable.empty());
schemaStore35.initialise(false, NULL);
SplittableRandom rng = new SplittableRandom(randomRule.seed());
LongHashSet indexes = new LongHashSet();
LongHashSet constraints = new LongHashSet();
for (int i = 0; i < 10; i++) {
long id = schemaStore35.nextId(NULL);
MutableLongSet target = rng.nextInt(3) < 2 ? indexes : constraints;
target.add(id);
}
List<SchemaRule> generatedRules = new ArrayList<>();
RealIdsRandomSchema randomSchema = new RealIdsRandomSchema(rng, indexes, constraints);
while (randomSchema.hasMoreIds()) {
try {
SchemaRule schemaRule = randomSchema.nextSchemaRule();
if (schemaRule instanceof ConstraintDescriptor) {
ConstraintDescriptor constraint = (ConstraintDescriptor) schemaRule;
if (constraint.isIndexBackedConstraint() && !constraint.asIndexBackedConstraint().hasOwnedIndexId()) {
// Filter out constraints that are supposed to own indexes, but don't, because those are illegal to persist.
randomSchema.rollback();
continue;
}
}
randomSchema.commit();
generatedRules.add(schemaRule);
List<DynamicRecord> dynamicRecords = allocateFrom(schemaStore35, schemaRule, NULL);
for (DynamicRecord dynamicRecord : dynamicRecords) {
schemaStore35.updateRecord(dynamicRecord, NULL);
}
} catch (NoSuchElementException ignore) {
// We're starting to run low on ids, but just ignore this and loop as along as there are still some left.
}
}
schemaStore35.flush(NULL);
schemaStore35.close();
RecordStoreVersionCheck check = getVersionCheck(pageCache, databaseLayout);
String versionToMigrateFrom = getVersionToMigrateFrom(check);
MigrationProgressMonitor progressMonitor = SILENT;
RecordStorageMigrator migrator = new RecordStorageMigrator(fs, pageCache, CONFIG, logService, jobScheduler, PageCacheTracer.NULL, batchImporterFactory, INSTANCE);
// When we migrate it to the new store format.
String versionToMigrateTo = getVersionToMigrateTo(check);
ProgressReporter reporter = progressMonitor.startSection("section");
migrator.migrate(databaseLayout, migrationLayout, reporter, versionToMigrateFrom, versionToMigrateTo, EMPTY);
migrator.moveMigratedFiles(migrationLayout, databaseLayout, versionToMigrateFrom, versionToMigrateTo);
generatedRules.sort(Comparator.comparingLong(SchemaRule::getId));
// Then the new store should retain an exact representation of the old-format schema rules.
StoreFactory storeFactory = new StoreFactory(databaseLayout, CONFIG, igf, pageCache, fs, logProvider, PageCacheTracer.NULL, writable());
try (NeoStores neoStores = storeFactory.openAllNeoStores()) {
SchemaStore schemaStore = neoStores.getSchemaStore();
TokenHolders tokenHolders = StoreTokens.readOnlyTokenHolders(neoStores, NULL);
SchemaStorage storage = new SchemaStorage(schemaStore, tokenHolders, () -> KernelVersion.LATEST);
List<SchemaRule> migratedRules = new ArrayList<>();
storage.getAll(NULL).iterator().forEachRemaining(migratedRules::add);
// Nerf the rule names, since migration may change those around.
migratedRules = migratedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
generatedRules = generatedRules.stream().map(r -> r.withName("a")).collect(Collectors.toList());
assertThat(migratedRules).isEqualTo(generatedRules);
}
}
use of org.neo4j.internal.schema.ConstraintDescriptor in project neo4j by neo4j.
the class SchemaRuleSerialization35Test method assertParseRelationshipPropertyExistsRule.
private static void assertParseRelationshipPropertyExistsRule(String serialized, String name) throws Exception {
// GIVEN
long ruleId = 51;
int propertyKey = 6119;
int relTypeId = 8512;
ConstraintDescriptor constraint = existsForRelType(relTypeId, propertyKey);
byte[] bytes = decodeBase64(serialized);
// WHEN
ConstraintDescriptor deserialized = assertConstraintRule(SchemaRuleSerialization35.deserialize(ruleId, ByteBuffer.wrap(bytes)));
// THEN
assertThat(deserialized.getId()).isEqualTo(ruleId);
assertThat(deserialized).isEqualTo(constraint);
assertThat(deserialized.schema()).isEqualTo(constraint.schema());
assertThatThrownBy(() -> deserialized.asIndexBackedConstraint().ownedIndexId()).isInstanceOf(IllegalStateException.class);
assertThat(deserialized.getName()).isEqualTo(name);
}
Aggregations