use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class LogsUpgrader method upgrade.
public void upgrade(DatabaseLayout layout) {
CommandReaderFactory commandReaderFactory = storageEngineFactory.commandReaderFactory();
try (MetadataProvider store = getMetaDataStore()) {
TransactionLogInitializer logInitializer = new TransactionLogInitializer(fs, store, commandReaderFactory, tracer);
Path transactionLogsDirectory = layout.getTransactionLogsDirectory();
Path legacyLogsDirectory = legacyLogsLocator.getTransactionLogsDirectory();
boolean filesNeedsToMove = !transactionLogsDirectory.equals(legacyLogsDirectory);
LogFiles logFiles = LogFilesBuilder.logFilesBasedOnlyBuilder(legacyLogsDirectory, fs).withCommandReaderFactory(commandReaderFactory).build();
// Move log files to their intended directory, if they are not there already.
Path[] legacyFiles = logFiles.logFiles();
if (legacyFiles != null && legacyFiles.length > 0) {
if (filesNeedsToMove) {
for (Path legacyFile : legacyFiles) {
fs.copyFile(legacyFile, transactionLogsDirectory.resolve(legacyFile.getFileName()), EMPTY_COPY_OPTIONS);
}
}
logInitializer.initializeExistingLogFiles(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
if (filesNeedsToMove) {
for (Path legacyFile : legacyFiles) {
fs.deleteFile(legacyFile);
}
}
} else {
// We didn't find any files in the legacy location.
// If the legacy location is the same as the intended location, then the log files are missing entirely.
// Otherwise, we will have to check if the log files are already present in the intended location and try to initialize them there.
logFiles = LogFilesBuilder.logFilesBasedOnlyBuilder(transactionLogsDirectory, fs).build();
legacyFiles = logFiles.logFiles();
if (legacyFiles != null && legacyFiles.length > 0) {
// The log files are already at their intended location, so initialize them there.
logInitializer.initializeExistingLogFiles(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
} else if (config.get(fail_on_missing_files)) {
// recovered state or not.
throw new UpgradeNotAllowedException();
} else {
// The log files are missing entirely, but we were told to not think of this as an error condition,
// so we instead initialize an empty log file.
logInitializer.initializeEmptyLogFile(layout, transactionLogsDirectory, UPGRADE_CHECKPOINT);
}
}
} catch (Exception exception) {
throw new StoreUpgrader.TransactionLogsRelocationException("Failure on attempt to move transaction logs into new location.", exception);
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class LogsUpgrader method buildLogFiles.
private LogFiles buildLogFiles(DatabaseLayout layout) {
final LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
final LogFiles logFiles;
try {
logFiles = LogFilesBuilder.builder(layout, fs).withLogEntryReader(logEntryReader).withConfig(config).withMemoryTracker(memoryTracker).withDatabaseHealth(databaseHealth).withDependencies(dependencyResolver).build();
} catch (IOException e) {
throw new RuntimeException(e);
}
return logFiles;
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method databasePanicShouldHandleOutOfMemoryErrors.
@Test
void databasePanicShouldHandleOutOfMemoryErrors() throws IOException, InterruptedException {
final CountDownLatch panicLatch = new CountDownLatch(1);
final CountDownLatch adversaryLatch = new CountDownLatch(1);
OutOfMemoryAwareFileSystem fs = new OutOfMemoryAwareFileSystem();
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth slowPanicDatabaseHealth = new SlowPanickingDatabaseHealth(panicLatch, adversaryLatch);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(slowPanicDatabaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, slowPanicDatabaseHealth));
life.start();
// Commit initial transaction
appender.append(tx(), LogAppendEvent.NULL);
// Try to commit one transaction, will fail during flush with OOM, but not actually panic
fs.shouldOOM = true;
Future<Long> failingTransaction = executor.submit(() -> appender.append(tx(), LogAppendEvent.NULL));
panicLatch.await();
// Try to commit one additional transaction, should fail since database has already panicked
fs.shouldOOM = false;
var e = assertThrows(IOException.class, () -> appender.append(tx(), new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
adversaryLatch.countDown();
return super.beginLogForceWait();
}
}));
assertThat(e).hasMessageContaining("The database has encountered a critical error");
// Check that we actually got an OutOfMemoryError
var executionException = assertThrows(ExecutionException.class, failingTransaction::get);
assertThat(executionException).hasCauseInstanceOf(OutOfMemoryError.class);
// Check number of transactions, should only have one
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogFile logFile = logFiles.getLogFile();
assertThat(logFile.getLowestLogVersion()).isEqualTo(logFile.getHighestLogVersion());
long version = logFile.getHighestLogVersion();
try (LogVersionedStoreChannel channel = logFile.openForVersion(version);
ReadAheadLogChannel readAheadLogChannel = new ReadAheadLogChannel(channel, INSTANCE);
LogEntryCursor cursor = new LogEntryCursor(logEntryReader, readAheadLogChannel)) {
LogEntry entry;
long numberOfTransactions = 0;
while (cursor.next()) {
entry = cursor.get();
if (entry instanceof LogEntryCommit) {
numberOfTransactions++;
}
}
assertThat(numberOfTransactions).isEqualTo(1L);
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method shouldHaveAllConcurrentAppendersSeePanic.
/*
* There was an issue where if multiple concurrent appending threads did append and they moved on
* to await a force, where the force would fail and the one doing the force would raise a panic...
* the other threads may not notice the panic and move on to mark those transactions as committed
* and notice the panic later (which would be too late).
*/
@Test
void shouldHaveAllConcurrentAppendersSeePanic() throws Throwable {
// GIVEN
Adversary adversary = new ClassGuardedAdversary(new CountingAdversary(1, true), failMethod(TransactionLogFile.class, "force"));
EphemeralFileSystemAbstraction efs = new EphemeralFileSystemAbstraction();
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, efs);
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth databaseHealth = new DatabaseHealth(mock(DatabasePanicEventGenerator.class), NullLog.getInstance());
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(databaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth));
life.start();
// WHEN
int numberOfAppenders = 10;
final CountDownLatch trap = new CountDownLatch(numberOfAppenders);
final LogAppendEvent beforeForceTrappingEvent = new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
trap.countDown();
awaitLatch(trap);
return super.beginLogForceWait();
}
};
Race race = new Race();
for (int i = 0; i < numberOfAppenders; i++) {
race.addContestant(() -> {
// Good, we know that this test uses an adversarial file system which will throw
// an exception in LogFile#force, and since all these transactions
// will append and be forced in the same batch, where the force will fail then
// all these transactions should fail. If there's any transaction not failing then
// it just didn't notice the panic, which would be potentially hazardous.
assertThrows(IOException.class, () -> appender.append(tx(), beforeForceTrappingEvent));
});
}
// THEN perform the race. The relevant assertions are made inside the contestants.
race.go();
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFiles in project neo4j by neo4j.
the class BatchingTransactionAppenderRotationIT method correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation.
@Test
void correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation() throws IOException {
LogFiles logFiles = getLogFiles(logVersionRepository, transactionIdStore);
life.add(logFiles);
Health databaseHealth = getDatabaseHealth();
LogRotation logRotation = FileLogRotation.transactionLogRotation(logFiles, Clock.systemUTC(), databaseHealth, monitors.newMonitor(LogRotationMonitor.class));
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache();
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth);
life.add(transactionAppender);
LogAppendEvent logAppendEvent = new RotationLogAppendEvent(logRotation);
TransactionToApply transactionToApply = prepareTransaction();
transactionAppender.append(transactionToApply, logAppendEvent);
LogFile logFile = logFiles.getLogFile();
assertEquals(1, logFile.getHighestLogVersion());
Path highestLogFile = logFile.getHighestLogFile();
LogHeader logHeader = LogHeaderReader.readLogHeader(fileSystem, highestLogFile, INSTANCE);
assertEquals(2, logHeader.getLastCommittedTxId());
}
Aggregations