use of org.neo4j.monitoring.DatabaseHealth in project neo4j by neo4j.
the class TransactionLogFileIT method doNotScanDirectoryOnRotate.
@Test
@EnabledOnOs(OS.LINUX)
void doNotScanDirectoryOnRotate() throws IOException {
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fileSystem).withTransactionIdStore(transactionIdStore).withLogVersionRepository(logVersionRepository).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
life.start();
MutableLong rotationObservedVersion = new MutableLong();
LogRotation logRotation = FileLogRotation.transactionLogRotation(logFiles, Clock.systemUTC(), new DatabaseHealth(NO_OP, NullLog.getInstance()), new LogRotationMonitorAdapter() {
@Override
public void startRotation(long currentLogVersion) {
rotationObservedVersion.setValue(currentLogVersion);
}
});
for (int i = 0; i < 6; i++) {
for (Path path : logFiles.logFiles()) {
FileUtils.deleteFile(path);
}
logRotation.rotateLogFile(LogAppendEvent.NULL);
}
assertEquals(5, rotationObservedVersion.getValue());
assertEquals(6, logFiles.getLogFile().getCurrentLogVersion());
}
use of org.neo4j.monitoring.DatabaseHealth in project neo4j by neo4j.
the class StoreUpgraderTest method newUpgrader.
private StoreUpgrader newUpgrader(StoreVersionCheck storeVersionCheck, PageCache pageCache, Config config, MigrationProgressMonitor progressMonitor, PageCacheTracer pageCacheTracer, boolean forceUpgrade) {
NullLogService instance = NullLogService.getInstance();
BatchImporterFactory batchImporterFactory = BatchImporterFactory.withHighestPriority();
RecordStorageMigrator defaultMigrator = new RecordStorageMigrator(fileSystem, pageCache, getTuningConfig(), instance, jobScheduler, pageCacheTracer, batchImporterFactory, INSTANCE);
StorageEngineFactory storageEngineFactory = StorageEngineFactory.defaultStorageEngine();
SchemaIndexMigrator indexMigrator = new SchemaIndexMigrator("Indexes", fileSystem, pageCache, IndexProvider.EMPTY.directoryStructure(), storageEngineFactory, true);
LegacyTransactionLogsLocator logsLocator = new LegacyTransactionLogsLocator(config, databaseLayout);
DatabaseHealth databaseHealth = new DatabaseHealth(NO_OP, NullLog.getInstance());
Dependencies dependencies = new Dependencies();
dependencies.satisfyDependencies(new Monitors());
LogsUpgrader logsUpgrader = new LogsUpgrader(fileSystem, storageEngineFactory, databaseLayout, pageCache, logsLocator, config, dependencies, pageCacheTracer, INSTANCE, databaseHealth, forceUpgrade);
StoreUpgrader upgrader = new StoreUpgrader(storeVersionCheck, progressMonitor, config, fileSystem, NullLogProvider.getInstance(), logsUpgrader, pageCacheTracer);
upgrader.addParticipant(indexMigrator);
upgrader.addParticipant(NOT_PARTICIPATING);
upgrader.addParticipant(NOT_PARTICIPATING);
upgrader.addParticipant(NOT_PARTICIPATING);
upgrader.addParticipant(NOT_PARTICIPATING);
upgrader.addParticipant(defaultMigrator);
return upgrader;
}
use of org.neo4j.monitoring.DatabaseHealth in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method databasePanicShouldHandleOutOfMemoryErrors.
@Test
void databasePanicShouldHandleOutOfMemoryErrors() throws IOException, InterruptedException {
final CountDownLatch panicLatch = new CountDownLatch(1);
final CountDownLatch adversaryLatch = new CountDownLatch(1);
OutOfMemoryAwareFileSystem fs = new OutOfMemoryAwareFileSystem();
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth slowPanicDatabaseHealth = new SlowPanickingDatabaseHealth(panicLatch, adversaryLatch);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(slowPanicDatabaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, slowPanicDatabaseHealth));
life.start();
// Commit initial transaction
appender.append(tx(), LogAppendEvent.NULL);
// Try to commit one transaction, will fail during flush with OOM, but not actually panic
fs.shouldOOM = true;
Future<Long> failingTransaction = executor.submit(() -> appender.append(tx(), LogAppendEvent.NULL));
panicLatch.await();
// Try to commit one additional transaction, should fail since database has already panicked
fs.shouldOOM = false;
var e = assertThrows(IOException.class, () -> appender.append(tx(), new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
adversaryLatch.countDown();
return super.beginLogForceWait();
}
}));
assertThat(e).hasMessageContaining("The database has encountered a critical error");
// Check that we actually got an OutOfMemoryError
var executionException = assertThrows(ExecutionException.class, failingTransaction::get);
assertThat(executionException).hasCauseInstanceOf(OutOfMemoryError.class);
// Check number of transactions, should only have one
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogFile logFile = logFiles.getLogFile();
assertThat(logFile.getLowestLogVersion()).isEqualTo(logFile.getHighestLogVersion());
long version = logFile.getHighestLogVersion();
try (LogVersionedStoreChannel channel = logFile.openForVersion(version);
ReadAheadLogChannel readAheadLogChannel = new ReadAheadLogChannel(channel, INSTANCE);
LogEntryCursor cursor = new LogEntryCursor(logEntryReader, readAheadLogChannel)) {
LogEntry entry;
long numberOfTransactions = 0;
while (cursor.next()) {
entry = cursor.get();
if (entry instanceof LogEntryCommit) {
numberOfTransactions++;
}
}
assertThat(numberOfTransactions).isEqualTo(1L);
}
}
use of org.neo4j.monitoring.DatabaseHealth in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method shouldHaveAllConcurrentAppendersSeePanic.
/*
* There was an issue where if multiple concurrent appending threads did append and they moved on
* to await a force, where the force would fail and the one doing the force would raise a panic...
* the other threads may not notice the panic and move on to mark those transactions as committed
* and notice the panic later (which would be too late).
*/
@Test
void shouldHaveAllConcurrentAppendersSeePanic() throws Throwable {
// GIVEN
Adversary adversary = new ClassGuardedAdversary(new CountingAdversary(1, true), failMethod(TransactionLogFile.class, "force"));
EphemeralFileSystemAbstraction efs = new EphemeralFileSystemAbstraction();
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, efs);
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth databaseHealth = new DatabaseHealth(mock(DatabasePanicEventGenerator.class), NullLog.getInstance());
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(databaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth));
life.start();
// WHEN
int numberOfAppenders = 10;
final CountDownLatch trap = new CountDownLatch(numberOfAppenders);
final LogAppendEvent beforeForceTrappingEvent = new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
trap.countDown();
awaitLatch(trap);
return super.beginLogForceWait();
}
};
Race race = new Race();
for (int i = 0; i < numberOfAppenders; i++) {
race.addContestant(() -> {
// Good, we know that this test uses an adversarial file system which will throw
// an exception in LogFile#force, and since all these transactions
// will append and be forced in the same batch, where the force will fail then
// all these transactions should fail. If there's any transaction not failing then
// it just didn't notice the panic, which would be potentially hazardous.
assertThrows(IOException.class, () -> appender.append(tx(), beforeForceTrappingEvent));
});
}
// THEN perform the race. The relevant assertions are made inside the contestants.
race.go();
}
use of org.neo4j.monitoring.DatabaseHealth in project neo4j by neo4j.
the class BatchingTransactionAppenderRotationIT method correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation.
@Test
void correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation() throws IOException {
LogFiles logFiles = getLogFiles(logVersionRepository, transactionIdStore);
life.add(logFiles);
Health databaseHealth = getDatabaseHealth();
LogRotation logRotation = FileLogRotation.transactionLogRotation(logFiles, Clock.systemUTC(), databaseHealth, monitors.newMonitor(LogRotationMonitor.class));
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache();
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth);
life.add(transactionAppender);
LogAppendEvent logAppendEvent = new RotationLogAppendEvent(logRotation);
TransactionToApply transactionToApply = prepareTransaction();
transactionAppender.append(transactionToApply, logAppendEvent);
LogFile logFile = logFiles.getLogFile();
assertEquals(1, logFile.getHighestLogVersion());
Path highestLogFile = logFile.getHighestLogFile();
LogHeader logHeader = LogHeaderReader.readLogHeader(fileSystem, highestLogFile, INSTANCE);
assertEquals(2, logHeader.getLastCommittedTxId());
}
Aggregations