use of org.neo4j.internal.batchimport.store.BatchingNeoStores in project neo4j by neo4j.
the class ImportLogicTest method closeImporterWithoutDiagnosticState.
@Test
void closeImporterWithoutDiagnosticState() throws IOException {
ExecutionMonitor monitor = mock(ExecutionMonitor.class);
IndexImporterFactory factory = mock(IndexImporterFactory.class);
try (BatchingNeoStores stores = batchingNeoStoresWithExternalPageCache(fileSystem, pageCache, NULL, databaseLayout, defaultFormat(), DEFAULT, getInstance(), AdditionalInitialIds.EMPTY, defaults(), INSTANCE)) {
// noinspection EmptyTryBlock
try (ImportLogic logic = new ImportLogic(databaseLayout, stores, DEFAULT, defaults(), getInstance(), monitor, defaultFormat(), Collector.EMPTY, NO_MONITOR, NULL, factory, EmptyMemoryTracker.INSTANCE)) {
// nothing to run in this import
logic.success();
}
}
verify(monitor).done(eq(true), anyLong(), contains("Data statistics is not available."));
}
use of org.neo4j.internal.batchimport.store.BatchingNeoStores in project neo4j by neo4j.
the class NodeImporterTest method shouldHandleLargeAmountsOfLabels.
@Test
void shouldHandleLargeAmountsOfLabels() throws IOException {
// given
IdMapper idMapper = mock(IdMapper.class);
JobScheduler scheduler = new ThreadPoolJobScheduler();
try (Lifespan life = new Lifespan(scheduler);
BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache(fs, pageCache, NULL, layout, Standard.LATEST_RECORD_FORMATS, Configuration.DEFAULT, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), INSTANCE)) {
stores.createNew();
// when
int numberOfLabels = 50;
long nodeId = 0;
try (NodeImporter importer = new NodeImporter(stores, idMapper, new DataImporter.Monitor(), NULL, INSTANCE)) {
importer.id(nodeId);
String[] labels = new String[numberOfLabels];
for (int i = 0; i < labels.length; i++) {
labels[i] = "Label" + i;
}
importer.labels(labels);
importer.endOfEntity();
}
// then
NodeStore nodeStore = stores.getNodeStore();
NodeRecord record = nodeStore.getRecord(nodeId, nodeStore.newRecord(), RecordLoad.NORMAL, CursorContext.NULL);
long[] labels = NodeLabelsField.parseLabelsField(record).get(nodeStore, CursorContext.NULL);
assertEquals(numberOfLabels, labels.length);
}
}
use of org.neo4j.internal.batchimport.store.BatchingNeoStores in project neo4j by neo4j.
the class HumanUnderstandableExecutionMonitor method initialize.
@Override
public void initialize(DependencyResolver dependencyResolver) {
this.dependencyResolver = dependencyResolver;
Input.Estimates estimates = dependencyResolver.resolveDependency(Input.Estimates.class);
BatchingNeoStores neoStores = dependencyResolver.resolveDependency(BatchingNeoStores.class);
IdMapper idMapper = dependencyResolver.resolveDependency(IdMapper.class);
pageCacheArrayFactoryMonitor = dependencyResolver.resolveDependency(PageCacheArrayFactoryMonitor.class);
long biggestCacheMemory = estimatedCacheSize(neoStores, NodeRelationshipCache.memoryEstimation(estimates.numberOfNodes()), idMapper.memoryEstimation(estimates.numberOfNodes()));
System.out.println();
printStageHeader("Import starting", ESTIMATED_NUMBER_OF_NODES, count(estimates.numberOfNodes()), ESTIMATED_NUMBER_OF_NODE_PROPERTIES, count(estimates.numberOfNodeProperties()), ESTIMATED_NUMBER_OF_RELATIONSHIPS, count(estimates.numberOfRelationships()), ESTIMATED_NUMBER_OF_RELATIONSHIP_PROPERTIES, count(estimates.numberOfRelationshipProperties()), ESTIMATED_DISK_SPACE_USAGE, bytesToString(nodesDiskUsage(estimates, neoStores) + relationshipsDiskUsage(estimates, neoStores) + estimates.sizeOfNodeProperties() + estimates.sizeOfRelationshipProperties()), ESTIMATED_REQUIRED_MEMORY_USAGE, bytesToString(biggestCacheMemory));
System.out.println();
}
use of org.neo4j.internal.batchimport.store.BatchingNeoStores in project neo4j by neo4j.
the class ImportLogicTest method shouldUseDataStatisticsCountsForPrintingFinalStats.
@Test
void shouldUseDataStatisticsCountsForPrintingFinalStats() throws IOException {
// given
ExecutionMonitor monitor = mock(ExecutionMonitor.class);
IndexImporterFactory factory = mock(IndexImporterFactory.class);
try (BatchingNeoStores stores = batchingNeoStoresWithExternalPageCache(fileSystem, pageCache, NULL, databaseLayout, defaultFormat(), DEFAULT, getInstance(), AdditionalInitialIds.EMPTY, defaults(), INSTANCE)) {
// when
DataStatistics.RelationshipTypeCount[] relationshipTypeCounts = new DataStatistics.RelationshipTypeCount[] { new DataStatistics.RelationshipTypeCount(0, 33), new DataStatistics.RelationshipTypeCount(1, 66) };
DataStatistics dataStatistics = new DataStatistics(100123, 100456, relationshipTypeCounts);
try (ImportLogic logic = new ImportLogic(databaseLayout, stores, DEFAULT, defaults(), getInstance(), monitor, defaultFormat(), Collector.EMPTY, NO_MONITOR, NULL, factory, EmptyMemoryTracker.INSTANCE)) {
logic.putState(dataStatistics);
logic.success();
}
// then
verify(monitor).done(eq(true), anyLong(), contains(dataStatistics.toString()));
}
}
use of org.neo4j.internal.batchimport.store.BatchingNeoStores in project neo4j by neo4j.
the class NodeImporterTest method tracePageCacheAccessOnNodeImport.
@Test
void tracePageCacheAccessOnNodeImport() throws IOException {
JobScheduler scheduler = new ThreadPoolJobScheduler();
try (Lifespan life = new Lifespan(scheduler);
BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache(fs, pageCache, NULL, layout, Standard.LATEST_RECORD_FORMATS, Configuration.DEFAULT, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), INSTANCE)) {
stores.createNew();
int numberOfLabels = 50;
long nodeId = 0;
var cacheTracer = new DefaultPageCacheTracer();
try (NodeImporter importer = new NodeImporter(stores, IdMappers.actual(), new DataImporter.Monitor(), cacheTracer, INSTANCE)) {
importer.id(nodeId);
String[] labels = new String[numberOfLabels];
for (int i = 0; i < labels.length; i++) {
labels[i] = "Label" + i;
}
importer.labels(labels);
importer.property("a", randomAscii(10));
importer.property("b", randomAscii(100));
importer.property("c", randomAscii(1000));
importer.endOfEntity();
}
NodeStore nodeStore = stores.getNodeStore();
NodeRecord record = nodeStore.getRecord(nodeId, nodeStore.newRecord(), RecordLoad.NORMAL, CursorContext.NULL);
long[] labels = NodeLabelsField.parseLabelsField(record).get(nodeStore, CursorContext.NULL);
assertEquals(numberOfLabels, labels.length);
assertThat(cacheTracer.faults()).isEqualTo(2);
assertThat(cacheTracer.pins()).isEqualTo(13);
assertThat(cacheTracer.unpins()).isEqualTo(13);
assertThat(cacheTracer.hits()).isEqualTo(11);
}
}
Aggregations