use of org.neo4j.internal.recordstorage.RecordStorageEngine in project neo4j by neo4j.
the class RecordFormatMigrationIT method skipMigrationIfFormatSpecifiedInConfig.
@Test
void skipMigrationIfFormatSpecifiedInConfig() {
DatabaseManagementService managementService = startManagementService(StandardV4_0.NAME);
GraphDatabaseAPI database = getDefaultDatabase(managementService);
try (Transaction transaction = database.beginTx()) {
Node node = transaction.createNode();
node.setProperty("a", "b");
transaction.commit();
}
managementService.shutdown();
managementService = startManagementService(StandardV4_0.NAME);
GraphDatabaseAPI nonUpgradedStore = getDefaultDatabase(managementService);
RecordStorageEngine storageEngine = nonUpgradedStore.getDependencyResolver().resolveDependency(RecordStorageEngine.class);
assertEquals(StandardV4_0.NAME, storageEngine.testAccessNeoStores().getRecordFormats().name());
managementService.shutdown();
}
use of org.neo4j.internal.recordstorage.RecordStorageEngine in project neo4j by neo4j.
the class DegreesRebuildFromStoreTest method skipNotUsedRecordsOnDegreeStoreRebuild.
@Test
void skipNotUsedRecordsOnDegreeStoreRebuild() throws Exception {
// given a dataset containing mixed sparse and dense nodes with relationships in random directions,
// where some chains have been marked as having external degrees
int denseThreshold = dense_node_threshold.defaultValue();
DatabaseLayout layout = DatabaseLayout.ofFlat(directory.homePath());
int[] relationshipTypes;
MutableLongLongMap expectedDegrees = LongLongMaps.mutable.empty();
try (Lifespan life = new Lifespan()) {
RecordStorageEngine storageEngine = openStorageEngine(layout, denseThreshold);
relationshipTypes = createRelationshipTypes(storageEngine);
life.add(storageEngine);
generateData(storageEngine, denseThreshold, relationshipTypes);
storageEngine.relationshipGroupDegreesStore().accept((groupId, direction, degree) -> expectedDegrees.put(combinedKeyOnGroupAndDirection(groupId, direction), degree), NULL);
assertThat(expectedDegrees.isEmpty()).isFalse();
RelationshipGroupStore groupStore = storageEngine.testAccessNeoStores().getRelationshipGroupStore();
long highId = groupStore.getHighId();
assertThat(highId).isGreaterThan(1);
for (int i = 10; i < highId; i++) {
RelationshipGroupRecord record = groupStore.getRecord(i, new RelationshipGroupRecord(i), RecordLoad.ALWAYS, NULL);
record.setInUse(false);
groupStore.updateRecord(record, NULL);
}
storageEngine.flushAndForce(NULL);
}
// when
directory.getFileSystem().deleteFile(layout.relationshipGroupDegreesStore());
try (Lifespan life = new Lifespan()) {
RecordStorageEngine storageEngine = assertDoesNotThrow(() -> life.add(openStorageEngine(layout, denseThreshold)));
// then
storageEngine.relationshipGroupDegreesStore().accept((groupId, direction, degree) -> {
long key = combinedKeyOnGroupAndDirection(groupId, direction);
assertThat(expectedDegrees.containsKey(key)).isTrue();
long expectedDegree = expectedDegrees.get(key);
expectedDegrees.remove(key);
assertThat(degree).isEqualTo(expectedDegree);
}, NULL);
assertThat(expectedDegrees.size()).isGreaterThan(0);
}
}
use of org.neo4j.internal.recordstorage.RecordStorageEngine in project neo4j by neo4j.
the class DetectAllRelationshipInconsistenciesIT method shouldDetectSabotagedRelationshipWhereEverItIs.
@Test
void shouldDetectSabotagedRelationshipWhereEverItIs() throws Exception {
// GIVEN a database which lots of relationships
GraphDatabaseAPI db = getGraphDatabaseAPI();
Sabotage sabotage;
try {
Node[] nodes = new Node[1_000];
Relationship[] relationships = new Relationship[10_000];
long additionalNodeId;
try (Transaction tx = db.beginTx()) {
for (int i = 0; i < nodes.length; i++) {
nodes[i] = tx.createNode(label("Foo"));
}
additionalNodeId = tx.createNode().getId();
for (int i = 0; i < 10_000; i++) {
relationships[i] = random.among(nodes).createRelationshipTo(random.among(nodes), MyRelTypes.TEST);
}
tx.commit();
}
// WHEN sabotaging a random relationship
DependencyResolver resolver = db.getDependencyResolver();
NeoStores neoStores = resolver.resolveDependency(RecordStorageEngine.class).testAccessNeoStores();
RelationshipStore relationshipStore = neoStores.getRelationshipStore();
Relationship sabotagedRelationships = random.among(relationships);
sabotage = sabotage(relationshipStore, sabotagedRelationships.getId(), additionalNodeId);
} finally {
managementService.shutdown();
}
// THEN the checker should find it, where ever it is in the store
db = getGraphDatabaseAPI();
try {
DependencyResolver resolver = db.getDependencyResolver();
RecordStorageEngine storageEngine = resolver.resolveDependency(RecordStorageEngine.class);
NeoStores neoStores = storageEngine.testAccessNeoStores();
CountsStore counts = (CountsStore) storageEngine.countsAccessor();
RelationshipGroupDegreesStore groupDegreesStore = storageEngine.relationshipGroupDegreesStore();
DirectStoreAccess directStoreAccess = new DirectStoreAccess(neoStores, db.getDependencyResolver().resolveDependency(IndexProviderMap.class), db.getDependencyResolver().resolveDependency(TokenHolders.class), db.getDependencyResolver().resolveDependency(IndexStatisticsStore.class), db.getDependencyResolver().resolveDependency(IdGeneratorFactory.class));
int threads = random.intBetween(2, 10);
FullCheck checker = new FullCheck(ProgressMonitorFactory.NONE, threads, ConsistencyFlags.DEFAULT, getTuningConfiguration(), DebugContext.NO_DEBUG, NodeBasedMemoryLimiter.DEFAULT);
AssertableLogProvider logProvider = new AssertableLogProvider(true);
ConsistencySummaryStatistics summary = checker.execute(resolver.resolveDependency(PageCache.class), directStoreAccess, () -> counts, () -> groupDegreesStore, null, PageCacheTracer.NULL, INSTANCE, logProvider.getLog(FullCheck.class));
int relationshipInconsistencies = summary.getInconsistencyCountForRecordType(RecordType.RELATIONSHIP);
assertTrue(relationshipInconsistencies > 0, "Couldn't detect sabotaged relationship " + sabotage);
assertThat(logProvider).containsMessages(sabotage.after.toString());
} finally {
managementService.shutdown();
}
}
use of org.neo4j.internal.recordstorage.RecordStorageEngine in project neo4j by neo4j.
the class FullCheckTokenIndexIT method check.
private ConsistencySummaryStatistics check(GraphDatabaseAPI database, Config config) throws ConsistencyCheckIncompleteException {
DependencyResolver dependencyResolver = database.getDependencyResolver();
RecordStorageEngine storageEngine = dependencyResolver.resolveDependency(RecordStorageEngine.class);
NeoStores neoStores = storageEngine.testAccessNeoStores();
IndexingService indexingService = dependencyResolver.resolveDependency(IndexingService.class);
DirectStoreAccess directStoreAccess = new DirectStoreAccess(neoStores, dependencyResolver.resolveDependency(IndexProviderMap.class), dependencyResolver.resolveDependency(TokenHolders.class), dependencyResolver.resolveDependency(IndexStatisticsStore.class), dependencyResolver.resolveDependency(IdGeneratorFactory.class));
CountsAccessor countsStore = storageEngine.countsAccessor();
RelationshipGroupDegreesStore groupDegreesStore = storageEngine.relationshipGroupDegreesStore();
PageCache pageCache = dependencyResolver.resolveDependency(PageCache.class);
IndexAccessors.IndexAccessorLookup indexAccessorLookup = new LookupAccessorsFromRunningDb(indexingService);
FullCheck checker = new FullCheck(ProgressMonitorFactory.NONE, defaultConsistencyCheckThreadsNumber(), ConsistencyFlags.DEFAULT, config, DebugContext.NO_DEBUG, NodeBasedMemoryLimiter.DEFAULT);
return checker.execute(pageCache, directStoreAccess, () -> (CountsStore) countsStore, () -> groupDegreesStore, indexAccessorLookup, PageCacheTracer.NULL, INSTANCE, logProvider.getLog("test"));
}
use of org.neo4j.internal.recordstorage.RecordStorageEngine in project neo4j by neo4j.
the class SchemaRecoveryIT method inconsistentlyFlushedTokensShouldBeRecovered.
@Test
void inconsistentlyFlushedTokensShouldBeRecovered() {
// given
Label label = label("User");
String property = "email";
startDb();
long initialConstraintCount;
long initialIndexCount;
try (Transaction tx = db.beginTx()) {
initialConstraintCount = Streams.stream(tx.schema().getConstraints()).count();
initialIndexCount = Streams.stream(tx.schema().getIndexes()).count();
}
try (Transaction tx = db.beginTx()) {
tx.schema().constraintFor(label).assertPropertyIsUnique(property).create();
tx.commit();
}
// Flush the property token store, but NOT the property token ~name~ store. This means tokens will refer to unused dynamic records for their names.
RecordStorageEngine storageEngine = db.getDependencyResolver().resolveDependency(RecordStorageEngine.class);
storageEngine.testAccessNeoStores().getPropertyKeyTokenStore().flush(NULL);
killDb();
// when
startDb();
// then assert that we can still read the schema correctly.
assertEquals(initialConstraintCount + 1, constraints(db).size());
assertEquals(initialIndexCount + 1, indexes(db).size());
}
Aggregations