use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class VersionAwareLogEntryReaderIT method readOnlyLogFilesWhileCommandsAreAvailable.
@Test
@EnabledOnOs(OS.LINUX)
void readOnlyLogFilesWhileCommandsAreAvailable() throws IOException {
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(entryReader).withLogVersionRepository(new SimpleLogVersionRepository()).withTransactionIdStore(new SimpleTransactionIdStore()).withStoreId(StoreId.UNKNOWN).build();
try (Lifespan lifespan = new Lifespan(logFiles)) {
assertEquals(kibiBytes(128), Files.size(logFiles.getLogFile().getHighestLogFile()));
LogPosition logPosition = entryReader.lastPosition();
assertEquals(0L, logPosition.getLogVersion());
// this position in a log file before 0's are actually starting
assertEquals(END_OF_DATA_OFFSET, logPosition.getByteOffset());
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class VersionAwareLogEntryReaderIT method readTillTheEndOfNotPreallocatedFile.
@Test
@DisabledOnOs(OS.LINUX)
void readTillTheEndOfNotPreallocatedFile() throws IOException {
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(entryReader).withLogVersionRepository(new SimpleLogVersionRepository()).withTransactionIdStore(new SimpleTransactionIdStore()).withStoreId(StoreId.UNKNOWN).build();
try (Lifespan lifespan = new Lifespan(logFiles)) {
LogPosition logPosition = entryReader.lastPosition();
assertEquals(0L, logPosition.getLogVersion());
assertEquals(Files.size(logFiles.getLogFile().getHighestLogFile()), logPosition.getByteOffset());
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class QuickImport method main.
public static void main(String[] arguments) throws IOException {
Args args = Args.parse(arguments);
long nodeCount = parseLongWithUnit(args.get("nodes", null));
long relationshipCount = parseLongWithUnit(args.get("relationships", null));
int labelCount = args.getNumber("labels", 4).intValue();
int relationshipTypeCount = args.getNumber("relationship-types", 4).intValue();
Path dir = Path.of(args.get("into"));
long randomSeed = args.getNumber("random-seed", currentTimeMillis()).longValue();
Configuration config = Configuration.COMMAS;
Extractors extractors = new Extractors(config.arrayDelimiter());
IdType idType = IdType.valueOf(args.get("id-type", IdType.INTEGER.name()));
Groups groups = new Groups();
Header nodeHeader = parseNodeHeader(args, idType, extractors, groups);
Header relationshipHeader = parseRelationshipHeader(args, idType, extractors, groups);
Config dbConfig;
String dbConfigFileName = args.get("db-config", null);
if (dbConfigFileName != null) {
dbConfig = Config.newBuilder().fromFile(Path.of(dbConfigFileName)).build();
} else {
dbConfig = Config.defaults();
}
Boolean highIo = args.has("high-io") ? args.getBoolean("high-io") : null;
LogProvider logging = NullLogProvider.getInstance();
long pageCacheMemory = args.getNumber("pagecache-memory", org.neo4j.internal.batchimport.Configuration.MAX_PAGE_CACHE_MEMORY).longValue();
org.neo4j.internal.batchimport.Configuration importConfig = new org.neo4j.internal.batchimport.Configuration.Overridden(defaultConfiguration(dir)) {
@Override
public int maxNumberOfProcessors() {
return args.getNumber("processors", super.maxNumberOfProcessors()).intValue();
}
@Override
public boolean highIO() {
return highIo != null ? highIo : super.highIO();
}
@Override
public long pageCacheMemory() {
return pageCacheMemory;
}
@Override
public long maxMemoryUsage() {
String custom = args.get("max-memory", null);
return custom != null ? parseMaxMemory(custom) : super.maxMemoryUsage();
}
@Override
public IndexConfig indexConfig() {
return IndexConfig.create().withLabelIndex().withRelationshipTypeIndex();
}
};
float factorBadNodeData = args.getNumber("factor-bad-node-data", 0).floatValue();
float factorBadRelationshipData = args.getNumber("factor-bad-relationship-data", 0).floatValue();
Input input = new DataGeneratorInput(nodeCount, relationshipCount, idType, randomSeed, 0, nodeHeader, relationshipHeader, labelCount, relationshipTypeCount, factorBadNodeData, factorBadRelationshipData);
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
Lifespan life = new Lifespan()) {
BatchImporter consumer;
if (args.getBoolean("to-csv")) {
consumer = new CsvOutput(dir, nodeHeader, relationshipHeader, config);
} else {
System.out.println("Seed " + randomSeed);
final JobScheduler jobScheduler = life.add(createScheduler());
boolean verbose = args.getBoolean("v");
ExecutionMonitor monitor = verbose ? new SpectrumExecutionMonitor(2, TimeUnit.SECONDS, System.out, 100) : defaultVisible();
consumer = BatchImporterFactory.withHighestPriority().instantiate(DatabaseLayout.ofFlat(dir), fileSystem, PageCacheTracer.NULL, importConfig, new SimpleLogService(logging, logging), monitor, EMPTY, dbConfig, RecordFormatSelector.selectForConfig(dbConfig, logging), NO_MONITOR, jobScheduler, Collector.EMPTY, TransactionLogInitializer.getLogFilesInitializer(), new IndexImporterFactoryImpl(dbConfig), INSTANCE);
}
consumer.doImport(input);
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class ExtensionContextTest method shouldConsultUnsatisfiedDependencyHandlerOnFailingDependencyClasses.
@Test
void shouldConsultUnsatisfiedDependencyHandlerOnFailingDependencyClasses() {
GlobalExtensionContext context = mock(GlobalExtensionContext.class);
ExtensionFailureStrategy handler = mock(ExtensionFailureStrategy.class);
// that hasn't got anything.
Dependencies dependencies = new Dependencies();
UninitializableExtensionFactory extensionFactory = new UninitializableExtensionFactory();
GlobalExtensions extensions = new GlobalExtensions(context, iterable(extensionFactory), dependencies, handler);
try (Lifespan ignored = new Lifespan(extensions)) {
verify(handler).handle(eq(extensionFactory), any(IllegalArgumentException.class));
}
}
use of org.neo4j.kernel.lifecycle.Lifespan in project neo4j by neo4j.
the class NodeImporterTest method tracePageCacheAccessOnNodeImport.
@Test
void tracePageCacheAccessOnNodeImport() throws IOException {
JobScheduler scheduler = new ThreadPoolJobScheduler();
try (Lifespan life = new Lifespan(scheduler);
BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache(fs, pageCache, NULL, layout, Standard.LATEST_RECORD_FORMATS, Configuration.DEFAULT, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), INSTANCE)) {
stores.createNew();
int numberOfLabels = 50;
long nodeId = 0;
var cacheTracer = new DefaultPageCacheTracer();
try (NodeImporter importer = new NodeImporter(stores, IdMappers.actual(), new DataImporter.Monitor(), cacheTracer, INSTANCE)) {
importer.id(nodeId);
String[] labels = new String[numberOfLabels];
for (int i = 0; i < labels.length; i++) {
labels[i] = "Label" + i;
}
importer.labels(labels);
importer.property("a", randomAscii(10));
importer.property("b", randomAscii(100));
importer.property("c", randomAscii(1000));
importer.endOfEntity();
}
NodeStore nodeStore = stores.getNodeStore();
NodeRecord record = nodeStore.getRecord(nodeId, nodeStore.newRecord(), RecordLoad.NORMAL, CursorContext.NULL);
long[] labels = NodeLabelsField.parseLabelsField(record).get(nodeStore, CursorContext.NULL);
assertEquals(numberOfLabels, labels.length);
assertThat(cacheTracer.faults()).isEqualTo(2);
assertThat(cacheTracer.pins()).isEqualTo(13);
assertThat(cacheTracer.unpins()).isEqualTo(13);
assertThat(cacheTracer.hits()).isEqualTo(11);
}
}
Aggregations