use of org.neo4j.test.scheduler.ThreadPoolJobScheduler in project neo4j by neo4j.
the class CsvInputEstimateCalculationIT method shouldCalculateCorrectEstimates.
@Test
void shouldCalculateCorrectEstimates() throws Exception {
// given a couple of input files of various layouts
Input input = generateData();
RecordFormats format = LATEST_RECORD_FORMATS;
Input.Estimates estimates = input.calculateEstimates(new PropertyValueRecordSizeCalculator(format.property().getRecordSize(NO_STORE_HEADER), GraphDatabaseInternalSettings.string_block_size.defaultValue(), 0, GraphDatabaseInternalSettings.array_block_size.defaultValue(), 0));
// when
Config config = Config.defaults();
FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
try (JobScheduler jobScheduler = new ThreadPoolJobScheduler()) {
new ParallelBatchImporter(databaseLayout, fs, PageCacheTracer.NULL, PBI_CONFIG, NullLogService.getInstance(), INVISIBLE, EMPTY, config, format, ImportLogic.NO_MONITOR, jobScheduler, Collector.EMPTY, LogFilesInitializer.NULL, IndexImporterFactory.EMPTY, EmptyMemoryTracker.INSTANCE).doImport(input);
// then compare estimates with actual disk sizes
SingleFilePageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory(fs);
try (PageCache pageCache = new MuninnPageCache(swapperFactory, jobScheduler, MuninnPageCache.config(1000));
NeoStores stores = new StoreFactory(databaseLayout, config, new DefaultIdGeneratorFactory(fs, immediate(), databaseLayout.getDatabaseName()), pageCache, fs, NullLogProvider.getInstance(), PageCacheTracer.NULL, writable()).openAllNeoStores()) {
assertRoughlyEqual(estimates.numberOfNodes(), stores.getNodeStore().getNumberOfIdsInUse());
assertRoughlyEqual(estimates.numberOfRelationships(), stores.getRelationshipStore().getNumberOfIdsInUse());
assertRoughlyEqual(estimates.numberOfNodeProperties() + estimates.numberOfRelationshipProperties(), calculateNumberOfProperties(stores));
}
long measuredPropertyStorage = propertyStorageSize();
long estimatedPropertyStorage = estimates.sizeOfNodeProperties() + estimates.sizeOfRelationshipProperties();
assertThat(estimatedPropertyStorage).as("Estimated property storage size of %s must be within 10%% of the measured size of %s.", bytesToString(estimatedPropertyStorage), bytesToString(measuredPropertyStorage)).isCloseTo(measuredPropertyStorage, withPercentage(10.0));
}
}
use of org.neo4j.test.scheduler.ThreadPoolJobScheduler in project neo4j by neo4j.
the class ConfigurableStandalonePageCacheFactoryTest method mustAutomaticallyStartEvictionThread.
@Test
void mustAutomaticallyStartEvictionThread() throws Exception {
try (FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
JobScheduler jobScheduler = new ThreadPoolJobScheduler()) {
Path file = testDirectory.homePath().resolve("a").normalize();
fs.write(file).close();
try (PageCache cache = ConfigurableStandalonePageCacheFactory.createPageCache(fs, jobScheduler, PageCacheTracer.NULL);
PagedFile pf = cache.map(file, 4096, DEFAULT_DATABASE_NAME);
PageCursor cursor = pf.io(0, PagedFile.PF_SHARED_WRITE_LOCK, CursorContext.NULL)) {
// If the eviction thread has not been started, then this test will block forever.
for (int i = 0; i < 10_000; i++) {
assertTrue(cursor.next());
cursor.putInt(42);
}
}
}
}
use of org.neo4j.test.scheduler.ThreadPoolJobScheduler in project neo4j by neo4j.
the class GBPTreePartialCreateFuzzIT method main.
static void main(String[] args) throws Exception {
// Just start and immediately close. The process spawning this subprocess will kill it in the middle of all this
Path file = Path.of(args[0]);
try (FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
JobScheduler jobScheduler = new ThreadPoolJobScheduler()) {
SingleFilePageSwapperFactory swapper = new SingleFilePageSwapperFactory(fs);
try (PageCache pageCache = new MuninnPageCache(swapper, jobScheduler, config(10))) {
fs.deleteFile(file);
new GBPTreeBuilder<>(pageCache, file, longLayout().build()).build().close();
}
}
}
use of org.neo4j.test.scheduler.ThreadPoolJobScheduler in project neo4j by neo4j.
the class StoreUpgraderInterruptionTestIT method setUpLabelScanStore.
@Before
public void setUpLabelScanStore() {
jobScheduler = new ThreadPoolJobScheduler();
neo4jLayout = Neo4jLayout.of(directory.homePath());
workingDatabaseLayout = neo4jLayout.databaseLayout(DEFAULT_DATABASE_NAME);
prepareDirectory = directory.directory("prepare");
legacyTransactionLogsLocator = new LegacyTransactionLogsLocator(Config.defaults(), workingDatabaseLayout);
pageCache = pageCacheRule.getPageCache(fs);
baselineFormat = RecordFormatSelector.selectForVersion(version);
successorFormat = RecordFormatSelector.findLatestFormatInFamily(baselineFormat).orElse(baselineFormat);
}
use of org.neo4j.test.scheduler.ThreadPoolJobScheduler in project neo4j by neo4j.
the class NodeImporterTest method shouldHandleLargeAmountsOfLabels.
@Test
void shouldHandleLargeAmountsOfLabels() throws IOException {
// given
IdMapper idMapper = mock(IdMapper.class);
JobScheduler scheduler = new ThreadPoolJobScheduler();
try (Lifespan life = new Lifespan(scheduler);
BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache(fs, pageCache, NULL, layout, Standard.LATEST_RECORD_FORMATS, Configuration.DEFAULT, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), INSTANCE)) {
stores.createNew();
// when
int numberOfLabels = 50;
long nodeId = 0;
try (NodeImporter importer = new NodeImporter(stores, idMapper, new DataImporter.Monitor(), NULL, INSTANCE)) {
importer.id(nodeId);
String[] labels = new String[numberOfLabels];
for (int i = 0; i < labels.length; i++) {
labels[i] = "Label" + i;
}
importer.labels(labels);
importer.endOfEntity();
}
// then
NodeStore nodeStore = stores.getNodeStore();
NodeRecord record = nodeStore.getRecord(nodeId, nodeStore.newRecord(), RecordLoad.NORMAL, CursorContext.NULL);
long[] labels = NodeLabelsField.parseLabelsField(record).get(nodeStore, CursorContext.NULL);
assertEquals(numberOfLabels, labels.length);
}
}
Aggregations