use of org.neo4j.logging.log4j.Log4jLogProvider in project neo4j by neo4j.
the class ConfigurableStandalonePageCacheFactory method createPageCache.
/**
* Create page cache
* @param fileSystem file system that page cache will be based on
* @param pageCacheTracer global page cache tracer
* @param config page cache configuration
* @param jobScheduler page cache job scheduler
* @return created page cache instance
*/
public static PageCache createPageCache(FileSystemAbstraction fileSystem, PageCacheTracer pageCacheTracer, Config config, JobScheduler jobScheduler, MemoryPools memoryPools) {
config.setIfNotSet(GraphDatabaseSettings.pagecache_memory, "8M");
Neo4jLoggerContext loggerContext = LogConfig.createBuilder(System.err, Level.INFO).withTimezone(config.get(GraphDatabaseSettings.db_timezone)).build();
try (Log4jLogProvider logProvider = new Log4jLogProvider(loggerContext)) {
ConfiguringPageCacheFactory pageCacheFactory = new ConfiguringPageCacheFactory(fileSystem, config, pageCacheTracer, logProvider.getLog(PageCache.class), jobScheduler, Clocks.nanoClock(), memoryPools);
return pageCacheFactory.getOrCreatePageCache();
}
}
use of org.neo4j.logging.log4j.Log4jLogProvider in project neo4j by neo4j.
the class ConcurrentUpdateIT method populateDbWithConcurrentUpdates.
@Test
void populateDbWithConcurrentUpdates() throws Exception {
DatabaseManagementService managementService = new TestDatabaseManagementServiceBuilder(databaseLayout).build();
GraphDatabaseService database = managementService.database(DEFAULT_DATABASE_NAME);
try {
RandomValues randomValues = RandomValues.create();
int counter = 1;
for (int j = 0; j < 100; j++) {
try (Transaction transaction = database.beginTx()) {
for (int i = 0; i < 5; i++) {
Node node = transaction.createNode(Label.label("label" + counter));
node.setProperty("property", randomValues.nextValue().asObject());
}
transaction.commit();
}
counter++;
}
int populatorCount = 5;
ExecutorService executor = Executors.newFixedThreadPool(populatorCount);
CountDownLatch startSignal = new CountDownLatch(1);
AtomicBoolean endSignal = new AtomicBoolean();
for (int i = 0; i < populatorCount; i++) {
executor.submit(new Populator(database, counter, startSignal, endSignal));
}
try {
try (Transaction transaction = database.beginTx()) {
transaction.schema().indexFor(Label.label("label10")).on("property").create();
transaction.commit();
}
startSignal.countDown();
try (Transaction transaction = database.beginTx()) {
transaction.schema().awaitIndexesOnline(populatorCount, TimeUnit.MINUTES);
transaction.commit();
}
} finally {
endSignal.set(true);
executor.shutdown();
// Basically we don't care to await their completion because they've done their job
}
} finally {
managementService.shutdown();
ConsistencyCheckService consistencyCheckService = new ConsistencyCheckService();
Config config = Config.defaults(GraphDatabaseSettings.pagecache_memory, "8m");
consistencyCheckService.runFullConsistencyCheck(databaseLayout, config, ProgressMonitorFactory.NONE, new Log4jLogProvider(System.out), false);
}
}
use of org.neo4j.logging.log4j.Log4jLogProvider in project neo4j by neo4j.
the class LabelScanStoreTxApplyRaceIT method shouldStressIt.
/**
* The test case is basically loads of concurrent CREATE/DELETE NODE or sometimes just CREATE, keeping the created node in an array
* for dedicated deleter threads to pick up and delete as fast as they can see them. This concurrently with large creation transactions.
*/
@Test
void shouldStressIt() throws Throwable {
// given
Race race = new Race().withMaxDuration(5, TimeUnit.SECONDS);
AtomicReferenceArray<Node> nodeHeads = new AtomicReferenceArray<>(NUMBER_OF_CREATORS);
for (int i = 0; i < NUMBER_OF_CREATORS; i++) {
race.addContestant(creator(nodeHeads, i));
}
race.addContestants(NUMBER_OF_DELETORS, deleter(nodeHeads));
// when
race.go();
// then
DatabaseLayout dbLayout = db.databaseLayout();
managementService.shutdown();
assertTrue(new ConsistencyCheckService().runFullConsistencyCheck(dbLayout, defaults(GraphDatabaseSettings.neo4j_home, testDirectory.homePath()), NONE, new Log4jLogProvider(System.out), false, new ConsistencyFlags(true, true, true)).isSuccessful());
}
use of org.neo4j.logging.log4j.Log4jLogProvider in project neo4j by neo4j.
the class DatabaseLogServiceTest method setUp.
@BeforeEach
void setUp() {
logProvider = new Log4jLogProvider(outContent, Level.DEBUG);
logService = new DatabaseLogService(namedDatabaseId, new SimpleLogService(logProvider));
}
use of org.neo4j.logging.log4j.Log4jLogProvider in project neo4j by neo4j.
the class CsvImporter method doImport.
private void doImport(Input input, Collector badCollector) {
boolean success = false;
Path internalLogFile = databaseConfig.get(store_internal_log_path);
try (JobScheduler jobScheduler = createInitialisedScheduler();
OutputStream outputStream = FileSystemUtils.createOrOpenAsOutputStream(fileSystem, internalLogFile, true);
Log4jLogProvider logProvider = Util.configuredLogProvider(databaseConfig, outputStream)) {
ExecutionMonitor executionMonitor = verbose ? new SpectrumExecutionMonitor(2, TimeUnit.SECONDS, stdOut, SpectrumExecutionMonitor.DEFAULT_WIDTH) : ExecutionMonitors.defaultVisible();
BatchImporter importer = BatchImporterFactory.withHighestPriority().instantiate(databaseLayout, fileSystem, pageCacheTracer, importConfig, new SimpleLogService(NullLogProvider.getInstance(), logProvider), executionMonitor, EMPTY, databaseConfig, RecordFormatSelector.selectForConfig(databaseConfig, logProvider), new PrintingImportLogicMonitor(stdOut, stdErr), jobScheduler, badCollector, TransactionLogInitializer.getLogFilesInitializer(), new IndexImporterFactoryImpl(databaseConfig), memoryTracker);
printOverview(databaseLayout.databaseDirectory(), nodeFiles, relationshipFiles, importConfig, stdOut);
importer.doImport(input);
success = true;
} catch (Exception e) {
throw andPrintError("Import error", e, verbose, stdErr);
} finally {
long numberOfBadEntries = badCollector.badEntries();
if (reportFile != null) {
if (numberOfBadEntries > 0) {
stdOut.println("There were bad entries which were skipped and logged into " + reportFile.toAbsolutePath());
}
}
if (!success) {
stdErr.println("WARNING Import failed. The store files in " + databaseLayout.databaseDirectory().toAbsolutePath() + " are left as they are, although they are likely in an unusable state. " + "Starting a database on these store files will likely fail or observe inconsistent records so " + "start at your own risk or delete the store manually");
}
}
}
Aggregations