use of org.neo4j.io.pagecache.tracing.DefaultPageCacheTracer in project neo4j by neo4j.
the class MetaDataStoreTest method tracePageCacheAssessOnGetStoreId.
@Test
void tracePageCacheAssessOnGetStoreId() throws IOException {
var cacheTracer = new DefaultPageCacheTracer();
var cursorContext = new CursorContext(cacheTracer.createPageCursorTracer("tracePageCacheAssessOnGetStoreId"));
try (var metaDataStore = newMetaDataStore()) {
MetaDataStore.getStoreId(pageCache, metaDataStore.getStorageFile(), databaseLayout.getDatabaseName(), cursorContext);
PageCursorTracer cursorTracer = cursorContext.getCursorTracer();
assertThat(cursorTracer.pins()).isEqualTo(5);
assertThat(cursorTracer.unpins()).isEqualTo(5);
assertThat(cursorTracer.hits()).isEqualTo(5);
}
}
use of org.neo4j.io.pagecache.tracing.DefaultPageCacheTracer in project neo4j by neo4j.
the class ParallelBatchImporterTest method shouldImportCsvData.
@ParameterizedTest
@MethodSource("params")
void shouldImportCsvData(InputIdGenerator inputIdGenerator, IdType idType) throws Exception {
this.inputIdGenerator = inputIdGenerator;
// GIVEN
ExecutionMonitor processorAssigner = ProcessorAssignmentStrategies.eagerRandomSaturation(config.maxNumberOfProcessors());
CapturingMonitor monitor = new CapturingMonitor(processorAssigner);
boolean successful = false;
Groups groups = new Groups();
IdGroupDistribution groupDistribution = new IdGroupDistribution(NODE_COUNT, NUMBER_OF_ID_GROUPS, random.random(), groups);
long nodeRandomSeed = random.nextLong();
long relationshipRandomSeed = random.nextLong();
var pageCacheTracer = new DefaultPageCacheTracer();
JobScheduler jobScheduler = new ThreadPoolJobScheduler();
// This will have statistically half the nodes be considered dense
Config dbConfig = Config.defaults(GraphDatabaseSettings.dense_node_threshold, RELATIONSHIPS_PER_NODE * 2);
IndexImporterFactoryImpl indexImporterFactory = new IndexImporterFactoryImpl(dbConfig);
final BatchImporter inserter = new ParallelBatchImporter(databaseLayout, fs, pageCacheTracer, config, NullLogService.getInstance(), monitor, EMPTY, dbConfig, getFormat(), ImportLogic.NO_MONITOR, jobScheduler, Collector.EMPTY, TransactionLogInitializer.getLogFilesInitializer(), indexImporterFactory, INSTANCE);
LongAdder propertyCount = new LongAdder();
LongAdder relationshipCount = new LongAdder();
try {
// WHEN
inserter.doImport(Input.input(nodes(nodeRandomSeed, NODE_COUNT, config.batchSize(), inputIdGenerator, groupDistribution, propertyCount), relationships(relationshipRandomSeed, RELATIONSHIP_COUNT, config.batchSize(), inputIdGenerator, groupDistribution, propertyCount, relationshipCount), idType, knownEstimates(NODE_COUNT, RELATIONSHIP_COUNT, NODE_COUNT * TOKENS.length / 2, RELATIONSHIP_COUNT * TOKENS.length / 2, NODE_COUNT * TOKENS.length / 2 * Long.BYTES, RELATIONSHIP_COUNT * TOKENS.length / 2 * Long.BYTES, NODE_COUNT * TOKENS.length / 2), groups));
assertThat(pageCacheTracer.pins()).isGreaterThan(0);
assertThat(pageCacheTracer.pins()).isEqualTo(pageCacheTracer.unpins());
assertThat(pageCacheTracer.pins()).isEqualTo(Math.addExact(pageCacheTracer.faults(), pageCacheTracer.hits()));
// THEN
DatabaseManagementService managementService = getDBMSBuilder(databaseLayout).build();
GraphDatabaseService db = managementService.database(DEFAULT_DATABASE_NAME);
try (Transaction tx = db.beginTx()) {
inputIdGenerator.reset();
verifyData(NODE_COUNT, RELATIONSHIP_COUNT, db, tx, groupDistribution, nodeRandomSeed, relationshipRandomSeed);
tx.commit();
} finally {
managementService.shutdown();
}
assertConsistent(databaseLayout);
successful = true;
} finally {
jobScheduler.close();
if (!successful) {
Path failureFile = databaseLayout.databaseDirectory().resolve("input");
try (PrintStream out = new PrintStream(Files.newOutputStream(failureFile))) {
out.println("Seed used in this failing run: " + random.seed());
out.println(inputIdGenerator);
inputIdGenerator.reset();
out.println();
out.println("Processor assignments");
out.println(processorAssigner.toString());
}
System.err.println("Additional debug information stored in " + failureFile);
}
}
}
use of org.neo4j.io.pagecache.tracing.DefaultPageCacheTracer in project neo4j by neo4j.
the class IndexPopulatorTests method dropShouldNotFlushContent.
@Test
void dropShouldNotFlushContent() throws IOException {
// given
DefaultPageCacheTracer tracer = new DefaultPageCacheTracer();
try (PageCache pageCache = pageCacheExtension.getPageCache(fs, PageCacheConfig.config().withTracer(tracer))) {
populator = createPopulator(pageCache);
populator.create();
long preDrop = tracer.flushes();
// when
populator.drop();
// then
long postDrop = tracer.flushes();
assertEquals(preDrop, postDrop);
}
}
use of org.neo4j.io.pagecache.tracing.DefaultPageCacheTracer in project neo4j by neo4j.
the class PropertyCreatorTest method startStore.
@BeforeEach
void startStore() {
neoStores = new StoreFactory(databaseLayout, Config.defaults(), new DefaultIdGeneratorFactory(fileSystem, immediate(), databaseLayout.getDatabaseName()), pageCache, fileSystem, NullLogProvider.getInstance(), PageCacheTracer.NULL, writable()).openNeoStores(true, StoreType.PROPERTY, StoreType.PROPERTY_STRING, StoreType.PROPERTY_ARRAY);
propertyStore = neoStores.getPropertyStore();
records = new DirectRecordAccess<>(propertyStore, Loaders.propertyLoader(propertyStore, NULL));
var pageCacheTracer = new DefaultPageCacheTracer();
cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer("propertyStore"));
creator = new PropertyCreator(propertyStore, new PropertyTraverser(NULL), cursorContext, INSTANCE);
}
Aggregations