use of org.neo4j.kernel.impl.transaction.log.files.LogFile in project neo4j by neo4j.
the class VersionAwareLogEntryReaderIT method correctlyResetPositionOfObservedZeroWhenChannelSwitchOnExactlyCheckedByte.
@Test
void correctlyResetPositionOfObservedZeroWhenChannelSwitchOnExactlyCheckedByte() throws IOException {
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogEntryReader(entryReader).withLogVersionRepository(new SimpleLogVersionRepository()).withTransactionIdStore(new SimpleTransactionIdStore()).withStoreId(StoreId.UNKNOWN).withKernelVersionProvider(() -> KernelVersion.V4_0).build();
try (Lifespan lifespan = new Lifespan(logFiles)) {
LogPositionMarker positionMarker = new LogPositionMarker();
LogFile logFile = logFiles.getLogFile();
long initialPosition = getLastReadablePosition(logFiles);
long checkpointsEndDataOffset = DEFAULT_READ_AHEAD_SIZE + initialPosition;
TransactionLogWriter logWriter = logFile.getTransactionLogWriter();
do {
logWriter.legacyCheckPoint(new LogPosition(4, 5));
logWriter.getCurrentPosition(positionMarker);
} while (positionMarker.getByteOffset() <= checkpointsEndDataOffset);
logFile.flush();
logFiles.getLogFile().rotate();
fs.truncate(logFiles.getLogFile().getLogFileForVersion(0), checkpointsEndDataOffset);
try (StoreChannel storeChannel = fs.write(logFiles.getLogFile().getLogFileForVersion(1))) {
storeChannel.position(CURRENT_FORMAT_LOG_HEADER_SIZE);
storeChannel.writeAll(ByteBuffer.wrap(new byte[] { 0 }));
}
try (ReadableLogChannel logChannel = logFiles.getLogFile().getReader(new LogPosition(0, initialPosition))) {
LogEntry logEntry = entryReader.readLogEntry(logChannel);
// we reading expected checkpoint records
assertThat(logEntry).isInstanceOf(LogEntryInlinedCheckPoint.class);
LogEntryInlinedCheckPoint checkPoint = (LogEntryInlinedCheckPoint) logEntry;
LogPosition logPosition = checkPoint.getLogPosition();
assertEquals(4, logPosition.getLogVersion());
assertEquals(5, logPosition.getByteOffset());
// set position to the end of the buffer to cause channel switch on next byte read
((PositionableChannel) logChannel).setCurrentPosition(checkpointsEndDataOffset);
while (entryReader.readLogEntry(logChannel) != null) {
// read to the end
}
// channel should be switched now and position should be just after the header
LogPosition position = entryReader.lastPosition();
assertEquals(CURRENT_FORMAT_LOG_HEADER_SIZE, position.getByteOffset());
assertEquals(1, position.getLogVersion());
}
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFile in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method databasePanicShouldHandleOutOfMemoryErrors.
@Test
void databasePanicShouldHandleOutOfMemoryErrors() throws IOException, InterruptedException {
final CountDownLatch panicLatch = new CountDownLatch(1);
final CountDownLatch adversaryLatch = new CountDownLatch(1);
OutOfMemoryAwareFileSystem fs = new OutOfMemoryAwareFileSystem();
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth slowPanicDatabaseHealth = new SlowPanickingDatabaseHealth(panicLatch, adversaryLatch);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(slowPanicDatabaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, slowPanicDatabaseHealth));
life.start();
// Commit initial transaction
appender.append(tx(), LogAppendEvent.NULL);
// Try to commit one transaction, will fail during flush with OOM, but not actually panic
fs.shouldOOM = true;
Future<Long> failingTransaction = executor.submit(() -> appender.append(tx(), LogAppendEvent.NULL));
panicLatch.await();
// Try to commit one additional transaction, should fail since database has already panicked
fs.shouldOOM = false;
var e = assertThrows(IOException.class, () -> appender.append(tx(), new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
adversaryLatch.countDown();
return super.beginLogForceWait();
}
}));
assertThat(e).hasMessageContaining("The database has encountered a critical error");
// Check that we actually got an OutOfMemoryError
var executionException = assertThrows(ExecutionException.class, failingTransaction::get);
assertThat(executionException).hasCauseInstanceOf(OutOfMemoryError.class);
// Check number of transactions, should only have one
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogFile logFile = logFiles.getLogFile();
assertThat(logFile.getLowestLogVersion()).isEqualTo(logFile.getHighestLogVersion());
long version = logFile.getHighestLogVersion();
try (LogVersionedStoreChannel channel = logFile.openForVersion(version);
ReadAheadLogChannel readAheadLogChannel = new ReadAheadLogChannel(channel, INSTANCE);
LogEntryCursor cursor = new LogEntryCursor(logEntryReader, readAheadLogChannel)) {
LogEntry entry;
long numberOfTransactions = 0;
while (cursor.next()) {
entry = cursor.get();
if (entry instanceof LogEntryCommit) {
numberOfTransactions++;
}
}
assertThat(numberOfTransactions).isEqualTo(1L);
}
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFile in project neo4j by neo4j.
the class BatchingTransactionAppenderRotationIT method correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation.
@Test
void correctLastAppliedToPreviousLogTransactionInHeaderOnLogFileRotation() throws IOException {
LogFiles logFiles = getLogFiles(logVersionRepository, transactionIdStore);
life.add(logFiles);
Health databaseHealth = getDatabaseHealth();
LogRotation logRotation = FileLogRotation.transactionLogRotation(logFiles, Clock.systemUTC(), databaseHealth, monitors.newMonitor(LogRotationMonitor.class));
TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache();
BatchingTransactionAppender transactionAppender = new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth);
life.add(transactionAppender);
LogAppendEvent logAppendEvent = new RotationLogAppendEvent(logRotation);
TransactionToApply transactionToApply = prepareTransaction();
transactionAppender.append(transactionToApply, logAppendEvent);
LogFile logFile = logFiles.getLogFile();
assertEquals(1, logFile.getHighestLogVersion());
Path highestLogFile = logFile.getHighestLogFile();
LogHeader logHeader = LogHeaderReader.readLogHeader(fileSystem, highestLogFile, INSTANCE);
assertEquals(2, logHeader.getLastCommittedTxId());
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFile in project neo4j by neo4j.
the class TransactionLogFileTest method shouldVisitLogFile.
@Test
void shouldVisitLogFile() throws Exception {
// GIVEN
LogFiles logFiles = buildLogFiles();
life.start();
life.add(logFiles);
LogFile logFile = logFiles.getLogFile();
var transactionLogWriter = logFile.getTransactionLogWriter();
var writer = transactionLogWriter.getChannel();
LogPosition position = transactionLogWriter.getCurrentPosition();
for (int i = 0; i < 5; i++) {
writer.put((byte) i);
}
logFile.flush();
// WHEN/THEN
final AtomicBoolean called = new AtomicBoolean();
logFile.accept(channel -> {
for (int i = 0; i < 5; i++) {
assertEquals((byte) i, channel.get());
}
called.set(true);
return true;
}, position);
assertTrue(called.get());
}
use of org.neo4j.kernel.impl.transaction.log.files.LogFile in project neo4j by neo4j.
the class TransactionLogFileTest method shouldForceLogChannel.
@Test
void shouldForceLogChannel() throws Throwable {
LogFiles logFiles = buildLogFiles();
life.start();
life.add(logFiles);
LogFile logFile = logFiles.getLogFile();
var capturingChannel = wrappingFileSystem.getCapturingChannel();
var flushesBefore = capturingChannel.getFlushCounter().get();
var writesBefore = capturingChannel.getWriteAllCounter().get();
logFile.forceAfterAppend(LogAppendEvent.NULL);
assertEquals(1, capturingChannel.getFlushCounter().get() - flushesBefore);
assertEquals(1, capturingChannel.getWriteAllCounter().get() - writesBefore);
}
Aggregations