use of org.neo4j.logging.LogProvider in project neo4j by neo4j.
the class QuickImport method main.
public static void main(String[] arguments) throws IOException {
Args args = Args.parse(arguments);
long nodeCount = parseLongWithUnit(args.get("nodes", null));
long relationshipCount = parseLongWithUnit(args.get("relationships", null));
int labelCount = args.getNumber("labels", 4).intValue();
int relationshipTypeCount = args.getNumber("relationship-types", 4).intValue();
Path dir = Path.of(args.get("into"));
long randomSeed = args.getNumber("random-seed", currentTimeMillis()).longValue();
Configuration config = Configuration.COMMAS;
Extractors extractors = new Extractors(config.arrayDelimiter());
IdType idType = IdType.valueOf(args.get("id-type", IdType.INTEGER.name()));
Groups groups = new Groups();
Header nodeHeader = parseNodeHeader(args, idType, extractors, groups);
Header relationshipHeader = parseRelationshipHeader(args, idType, extractors, groups);
Config dbConfig;
String dbConfigFileName = args.get("db-config", null);
if (dbConfigFileName != null) {
dbConfig = Config.newBuilder().fromFile(Path.of(dbConfigFileName)).build();
} else {
dbConfig = Config.defaults();
}
Boolean highIo = args.has("high-io") ? args.getBoolean("high-io") : null;
LogProvider logging = NullLogProvider.getInstance();
long pageCacheMemory = args.getNumber("pagecache-memory", org.neo4j.internal.batchimport.Configuration.MAX_PAGE_CACHE_MEMORY).longValue();
org.neo4j.internal.batchimport.Configuration importConfig = new org.neo4j.internal.batchimport.Configuration.Overridden(defaultConfiguration(dir)) {
@Override
public int maxNumberOfProcessors() {
return args.getNumber("processors", super.maxNumberOfProcessors()).intValue();
}
@Override
public boolean highIO() {
return highIo != null ? highIo : super.highIO();
}
@Override
public long pageCacheMemory() {
return pageCacheMemory;
}
@Override
public long maxMemoryUsage() {
String custom = args.get("max-memory", null);
return custom != null ? parseMaxMemory(custom) : super.maxMemoryUsage();
}
@Override
public IndexConfig indexConfig() {
return IndexConfig.create().withLabelIndex().withRelationshipTypeIndex();
}
};
float factorBadNodeData = args.getNumber("factor-bad-node-data", 0).floatValue();
float factorBadRelationshipData = args.getNumber("factor-bad-relationship-data", 0).floatValue();
Input input = new DataGeneratorInput(nodeCount, relationshipCount, idType, randomSeed, 0, nodeHeader, relationshipHeader, labelCount, relationshipTypeCount, factorBadNodeData, factorBadRelationshipData);
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
Lifespan life = new Lifespan()) {
BatchImporter consumer;
if (args.getBoolean("to-csv")) {
consumer = new CsvOutput(dir, nodeHeader, relationshipHeader, config);
} else {
System.out.println("Seed " + randomSeed);
final JobScheduler jobScheduler = life.add(createScheduler());
boolean verbose = args.getBoolean("v");
ExecutionMonitor monitor = verbose ? new SpectrumExecutionMonitor(2, TimeUnit.SECONDS, System.out, 100) : defaultVisible();
consumer = BatchImporterFactory.withHighestPriority().instantiate(DatabaseLayout.ofFlat(dir), fileSystem, PageCacheTracer.NULL, importConfig, new SimpleLogService(logging, logging), monitor, EMPTY, dbConfig, RecordFormatSelector.selectForConfig(dbConfig, logging), NO_MONITOR, jobScheduler, Collector.EMPTY, TransactionLogInitializer.getLogFilesInitializer(), new IndexImporterFactoryImpl(dbConfig), INSTANCE);
}
consumer.doImport(input);
}
}
use of org.neo4j.logging.LogProvider in project neo4j by neo4j.
the class AbstractEditionModule method serverSideRoutingTableProvider.
protected ServerSideRoutingTableProvider serverSideRoutingTableProvider(GlobalModule globalModule) {
ConnectorPortRegister portRegister = globalModule.getConnectorPortRegister();
Config config = globalModule.getGlobalConfig();
LogProvider logProvider = globalModule.getLogService().getInternalLogProvider();
RoutingTableTTLProvider ttlProvider = RoutingTableTTLProvider.ttlFromConfig(config);
return new SingleAddressRoutingTableProvider(portRegister, RoutingOption.ROUTE_WRITE_AND_READ, config, logProvider, ttlProvider);
}
use of org.neo4j.logging.LogProvider in project neo4j by neo4j.
the class ConcurrentChangesOnEntitiesTest method assertDatabaseConsistent.
private void assertDatabaseConsistent() {
LogProvider logProvider = new Log4jLogProvider(System.out);
assertDoesNotThrow(() -> {
ConsistencyCheckService.Result result = new ConsistencyCheckService().runFullConsistencyCheck(databaseLayout, Config.defaults(), ProgressMonitorFactory.textual(System.err), logProvider, false);
Assertions.assertTrue(result.isSuccessful());
});
}
use of org.neo4j.logging.LogProvider in project neo4j by neo4j.
the class IndexPopulationJobTest method shouldCloseAndFailOnFailure.
@Test
void shouldCloseAndFailOnFailure() throws Exception {
createNode(map(name, "irrelephant"), FIRST);
LogProvider logProvider = NullLogProvider.getInstance();
FlippableIndexProxy index = mock(FlippableIndexProxy.class);
IndexPopulator populator = spy(indexPopulator(false));
IndexPopulationJob job = newIndexPopulationJob(populator, index, indexStoreView, logProvider, EntityType.NODE, indexPrototype(FIRST, name, false));
String failureMessage = "not successful";
IllegalStateException failure = new IllegalStateException(failureMessage);
doThrow(failure).when(populator).create();
// When
job.run();
// Then
verify(populator).markAsFailed(contains(failureMessage));
}
use of org.neo4j.logging.LogProvider in project neo4j by neo4j.
the class DatabaseMigrator method migrate.
/**
* Performs construction of {@link StoreUpgrader} and all of the necessary participants and performs store
* migration if that is required.
*
* @param forceUpgrade Ignore the value of the {@link GraphDatabaseSettings#allow_upgrade} setting.
*/
public void migrate(boolean forceUpgrade) throws IOException {
StoreVersionCheck versionCheck = storageEngineFactory.versionCheck(fs, databaseLayout, config, pageCache, logService, pageCacheTracer);
LogsUpgrader logsUpgrader = new LogsUpgrader(fs, storageEngineFactory, databaseLayout, pageCache, legacyLogsLocator, config, dependencyResolver, pageCacheTracer, memoryTracker, databaseHealth, forceUpgrade);
Log userLog = logService.getUserLog(DatabaseMigrator.class);
VisibleMigrationProgressMonitor progress = new VisibleMigrationProgressMonitor(userLog);
LogProvider logProvider = logService.getInternalLogProvider();
StoreUpgrader storeUpgrader = new StoreUpgrader(versionCheck, progress, config, fs, logProvider, logsUpgrader, pageCacheTracer);
// Get all the participants from the storage engine and add them where they want to be
var storeParticipants = storageEngineFactory.migrationParticipants(fs, config, pageCache, jobScheduler, logService, pageCacheTracer, memoryTracker);
storeParticipants.forEach(storeUpgrader::addParticipant);
IndexProviderMap indexProviderMap = dependencyResolver.resolveDependency(IndexProviderMap.class);
IndexConfigMigrator indexConfigMigrator = new IndexConfigMigrator(fs, config, pageCache, logService, storageEngineFactory, indexProviderMap, logService.getUserLog(IndexConfigMigrator.class), pageCacheTracer, memoryTracker);
storeUpgrader.addParticipant(indexConfigMigrator);
IndexProviderMigrator indexProviderMigrator = new IndexProviderMigrator(fs, config, pageCache, logService, storageEngineFactory, pageCacheTracer, memoryTracker);
storeUpgrader.addParticipant(indexProviderMigrator);
// Do individual index provider migration last because they may delete files that we need in earlier steps.
indexProviderMap.accept(provider -> storeUpgrader.addParticipant(provider.storeMigrationParticipant(fs, pageCache, storageEngineFactory)));
try {
storeUpgrader.migrateIfNeeded(databaseLayout, forceUpgrade);
} catch (Exception e) {
userLog.error("Error upgrading database. Database left intact and will likely not be able to start: " + e.toString());
throw e;
}
}
Aggregations