use of org.neo4j.logging.internal.SimpleLogService in project neo4j by neo4j.
the class ConsistencyCheckService method runFullConsistencyCheck.
public Result runFullConsistencyCheck(DatabaseLayout databaseLayout, Config config, ProgressMonitorFactory progressFactory, final LogProvider logProvider, final FileSystemAbstraction fileSystem, final PageCache pageCache, DebugContext debugContext, Path reportDir, ConsistencyFlags consistencyFlags, PageCacheTracer pageCacheTracer, MemoryTracker memoryTracker) throws ConsistencyCheckIncompleteException {
assertRecovered(databaseLayout, config, fileSystem, memoryTracker);
Log outLog = logProvider.getLog(getClass());
config.set(GraphDatabaseSettings.pagecache_warmup_enabled, false);
LifeSupport life = new LifeSupport();
final DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory(fileSystem, immediate(), databaseLayout.getDatabaseName());
DatabaseReadOnlyChecker readOnlyChecker = readOnly();
StoreFactory factory = new StoreFactory(databaseLayout, config, idGeneratorFactory, pageCache, fileSystem, logProvider, pageCacheTracer, readOnlyChecker);
// Don't start the counts stores here as part of life, instead only shut down. This is because it's better to let FullCheck
// start it and add its missing/broken detection where it can report to user.
ConsistencySummaryStatistics summary;
final Path reportFile = chooseReportPath(reportDir);
Log4jLogProvider reportLogProvider = new Log4jLogProvider(LogConfig.createBuilder(fileSystem, reportFile, Level.INFO).createOnDemand().withCategory(false).build());
Log reportLog = reportLogProvider.getLog(getClass());
Log log = new DuplicatingLog(outLog, reportLog);
// Bootstrap kernel extensions
Monitors monitors = new Monitors();
JobScheduler jobScheduler = life.add(JobSchedulerFactory.createInitialisedScheduler());
TokenHolders tokenHolders = new TokenHolders(new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_PROPERTY_KEY), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_LABEL), new DelegatingTokenHolder(new ReadOnlyTokenCreator(), TokenHolder.TYPE_RELATIONSHIP_TYPE));
final RecoveryCleanupWorkCollector workCollector = RecoveryCleanupWorkCollector.ignore();
DatabaseExtensions extensions = life.add(instantiateExtensions(databaseLayout, fileSystem, config, new SimpleLogService(logProvider), pageCache, jobScheduler, workCollector, // We use TOOL context because it's true, and also because it uses the 'single' operational mode, which is important.
TOOL, monitors, tokenHolders, pageCacheTracer, readOnlyChecker));
DefaultIndexProviderMap indexes = life.add(new DefaultIndexProviderMap(extensions, config));
try (NeoStores neoStores = factory.openAllNeoStores()) {
long lastCommittedTransactionId = neoStores.getMetaDataStore().getLastCommittedTransactionId();
CountsStoreManager countsStoreManager = life.add(new CountsStoreManager(pageCache, fileSystem, databaseLayout, pageCacheTracer, memoryTracker, lastCommittedTransactionId));
RelationshipGroupDegreesStoreManager groupDegreesStoreManager = life.add(new RelationshipGroupDegreesStoreManager(pageCache, fileSystem, databaseLayout, pageCacheTracer, memoryTracker, lastCommittedTransactionId));
// Load tokens before starting extensions, etc.
try (var cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer(CONSISTENCY_TOKEN_READER_TAG))) {
tokenHolders.setInitialTokens(StoreTokens.allReadableTokens(neoStores), cursorContext);
}
life.start();
IndexStatisticsStore indexStatisticsStore = new IndexStatisticsStore(pageCache, databaseLayout, workCollector, readOnlyChecker, pageCacheTracer);
life.add(indexStatisticsStore);
int numberOfThreads = defaultConsistencyCheckThreadsNumber();
DirectStoreAccess stores = new DirectStoreAccess(neoStores, indexes, tokenHolders, indexStatisticsStore, idGeneratorFactory);
double memoryLimitLeewayFactor = config.get(GraphDatabaseInternalSettings.consistency_check_memory_limit_factor);
FullCheck check = new FullCheck(progressFactory, numberOfThreads, consistencyFlags, config, debugContext, NodeBasedMemoryLimiter.defaultWithLeeway(memoryLimitLeewayFactor));
summary = check.execute(pageCache, stores, countsStoreManager, groupDegreesStoreManager, null, pageCacheTracer, memoryTracker, log);
} finally {
life.shutdown();
reportLogProvider.close();
}
if (!summary.isConsistent()) {
log.warn("See '%s' for a detailed consistency report.", reportFile);
return Result.failure(reportFile, summary);
}
return Result.success(reportFile, summary);
}
use of org.neo4j.logging.internal.SimpleLogService in project neo4j by neo4j.
the class BoltResponseMessageWriterV3Test method shouldLimitLogOutputToSensibleSizes.
/**
* Asserts that large values aren't passed directly to the log provider as this may lead to overflows when flushing the message.
*/
@Test
void shouldLimitLogOutputToSensibleSizes() throws IOException {
PackOutput output = mock(PackOutput.class);
Neo4jPack.Packer packer = mock(Neo4jPack.Packer.class);
IOException error = new IOException("Unable to flush");
doThrow(error).when(packer).pack(ArgumentMatchers.any(AnyValue.class));
AssertableLogProvider logProvider = new AssertableLogProvider();
var writer = new BoltResponseMessageWriterV3(out -> packer, output, new SimpleLogService(logProvider));
var listValue = VirtualValues.list();
for (var i = 0; i < 1000; i++) {
listValue = VirtualValues.list(listValue);
}
var testValue = listValue;
var cause = assertThrows(IOException.class, () -> writer.consumeField(testValue));
assertSame(error, cause);
assertThat(logProvider).forClass(BoltResponseMessageWriterV3.class).containsMessagesOnce("Failed to write value");
assertThat(logProvider).forClass(BoltResponseMessageWriterV3.class).doesNotContainMessageWithArguments("Failed to write value %s because: %s", listValue, cause.getMessage());
}
Aggregations