use of org.neo4j.kernel.impl.transaction.log.entry.LogEntry in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method databasePanicShouldHandleOutOfMemoryErrors.
@Test
void databasePanicShouldHandleOutOfMemoryErrors() throws IOException, InterruptedException {
final CountDownLatch panicLatch = new CountDownLatch(1);
final CountDownLatch adversaryLatch = new CountDownLatch(1);
OutOfMemoryAwareFileSystem fs = new OutOfMemoryAwareFileSystem();
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth slowPanicDatabaseHealth = new SlowPanickingDatabaseHealth(panicLatch, adversaryLatch);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(slowPanicDatabaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, slowPanicDatabaseHealth));
life.start();
// Commit initial transaction
appender.append(tx(), LogAppendEvent.NULL);
// Try to commit one transaction, will fail during flush with OOM, but not actually panic
fs.shouldOOM = true;
Future<Long> failingTransaction = executor.submit(() -> appender.append(tx(), LogAppendEvent.NULL));
panicLatch.await();
// Try to commit one additional transaction, should fail since database has already panicked
fs.shouldOOM = false;
var e = assertThrows(IOException.class, () -> appender.append(tx(), new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
adversaryLatch.countDown();
return super.beginLogForceWait();
}
}));
assertThat(e).hasMessageContaining("The database has encountered a critical error");
// Check that we actually got an OutOfMemoryError
var executionException = assertThrows(ExecutionException.class, failingTransaction::get);
assertThat(executionException).hasCauseInstanceOf(OutOfMemoryError.class);
// Check number of transactions, should only have one
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogFile logFile = logFiles.getLogFile();
assertThat(logFile.getLowestLogVersion()).isEqualTo(logFile.getHighestLogVersion());
long version = logFile.getHighestLogVersion();
try (LogVersionedStoreChannel channel = logFile.openForVersion(version);
ReadAheadLogChannel readAheadLogChannel = new ReadAheadLogChannel(channel, INSTANCE);
LogEntryCursor cursor = new LogEntryCursor(logEntryReader, readAheadLogChannel)) {
LogEntry entry;
long numberOfTransactions = 0;
while (cursor.next()) {
entry = cursor.get();
if (entry instanceof LogEntryCommit) {
numberOfTransactions++;
}
}
assertThat(numberOfTransactions).isEqualTo(1L);
}
}
use of org.neo4j.kernel.impl.transaction.log.entry.LogEntry in project neo4j by neo4j.
the class TransactionLogInitializer method initializeExistingLogFiles.
/**
* Make sure that any existing log files in the given transaction logs directory are initialised.
* This is done when we migrate 3.x stores into a 4.x world.
*/
public void initializeExistingLogFiles(DatabaseLayout layout, Path transactionLogsDirectory, String checkpointReason) throws Exception {
try (LogFilesSpan span = buildLogFiles(layout, transactionLogsDirectory)) {
LogFiles logFiles = span.getLogFiles();
LogFile logFile = logFiles.getLogFile();
LogHeader logHeader = logFile.extractHeader(logFile.getLowestLogVersion());
VersionAwareLogEntryReader entryReader = new VersionAwareLogEntryReader(commandReaderFactory, false);
try (var readableChannel = logFile.getReader(logHeader.getStartPosition());
var cursor = new LogEntryCursor(entryReader, readableChannel)) {
while (cursor.next()) {
LogEntry entry = cursor.get();
if (entry.getType() == LogEntryTypeCodes.TX_COMMIT) {
// The log files already contain a transaction, so we can only append checkpoint for the end of the log files.
appendCheckpoint(logFiles, checkpointReason, logFile.getTransactionLogWriter().getCurrentPosition());
return;
}
}
}
appendEmptyTransactionAndCheckPoint(logFiles, checkpointReason);
}
}
use of org.neo4j.kernel.impl.transaction.log.entry.LogEntry in project neo4j by neo4j.
the class LegacyCheckpointLogFile method reachableCheckpoints.
public List<CheckpointInfo> reachableCheckpoints() throws IOException {
var logFile = logFiles.getLogFile();
long highestVersion = logFile.getHighestLogVersion();
if (highestVersion < 0) {
return emptyList();
}
long lowestVersion = logFile.getLowestLogVersion();
long currentVersion = highestVersion;
var checkpoints = new ArrayList<CheckpointInfo>();
while (currentVersion >= lowestVersion) {
try (var channel = logFile.openForVersion(currentVersion);
var reader = new ReadAheadLogChannel(channel, NO_MORE_CHANNELS, context.getMemoryTracker());
var logEntryCursor = new LogEntryCursor(context.getLogEntryReader(), reader)) {
LogHeader logHeader = logFile.extractHeader(currentVersion);
var storeId = logHeader.getStoreId();
LogPosition lastLocation = reader.getCurrentPosition();
while (logEntryCursor.next()) {
LogEntry logEntry = logEntryCursor.get();
// Collect data about latest checkpoint
if (logEntry instanceof LogEntryInlinedCheckPoint) {
checkpoints.add(new CheckpointInfo((LogEntryInlinedCheckPoint) logEntry, storeId, lastLocation));
}
lastLocation = reader.getCurrentPosition();
}
currentVersion--;
}
}
return checkpoints;
}
use of org.neo4j.kernel.impl.transaction.log.entry.LogEntry in project neo4j by neo4j.
the class LogTestUtils method filterTransactionLogFile.
private static void filterTransactionLogFile(FileSystemAbstraction fileSystem, Path file, final LogHook<LogEntry> filter, ChannelNativeAccessor channelNativeAccessor) throws IOException {
filter.file(file);
try (StoreChannel in = fileSystem.read(file)) {
LogHeader logHeader = readLogHeader(ByteBuffers.allocate(CURRENT_FORMAT_LOG_HEADER_SIZE, INSTANCE), in, true, file);
assert logHeader != null : "Looks like we tried to read a log header of an empty pre-allocated file.";
PhysicalLogVersionedStoreChannel inChannel = new PhysicalLogVersionedStoreChannel(in, logHeader.getLogVersion(), logHeader.getLogFormatVersion(), file, channelNativeAccessor);
ReadableLogChannel inBuffer = new ReadAheadLogChannel(inChannel, INSTANCE);
LogEntryReader entryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogEntry entry;
while ((entry = entryReader.readLogEntry(inBuffer)) != null) {
filter.test(entry);
}
}
}
use of org.neo4j.kernel.impl.transaction.log.entry.LogEntry in project neo4j by neo4j.
the class DumpLogicalLog method main.
/**
* Usage: [--txfilter "regex"] [--ccfilter cc-report-file] [--tofile] storeDirOrFile1 storeDirOrFile2 ...
*
* --txfilter
* Will match regex against each {@link LogEntry} and if there is a match,
* include transaction containing the LogEntry in the dump.
* regex matching is done with {@link Pattern}
*
* --ccfilter
* Will look at an inconsistency report file from consistency checker and
* include transactions that are relevant to them
*
* --tofile
* Redirects output to dump-logical-log.txt in the store directory
*/
public static void main(String[] args) throws IOException {
Args arguments = Args.withFlags(TO_FILE).parse(args);
TimeZone timeZone = parseTimeZoneConfig(arguments);
Predicate<LogEntry[]> filter = parseFilter(arguments, timeZone);
Function<LogEntry, String> serializer = parseSerializer(filter, timeZone);
try (FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
Printer printer = getPrinter(arguments)) {
for (String fileAsString : arguments.orphans()) {
new DumpLogicalLog(fileSystem).dump(fileAsString, printer.getFor(fileAsString), filter, serializer);
}
}
}
Aggregations