use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class UpgradableDatabase method checkCleanShutDownByCheckPoint.
private Result checkCleanShutDownByCheckPoint(File storeDirectory) {
// check version
PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDirectory, fs);
LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder latestCheckPointFinder = new LatestCheckPointFinder(logFiles, fs, logEntryReader);
try {
LatestCheckPoint latestCheckPoint = latestCheckPointFinder.find(logFiles.getHighestLogVersion());
if (!latestCheckPoint.commitsAfterCheckPoint) {
return new Result(Result.Outcome.ok, null, null);
}
} catch (IOException e) {
// ignore exception and return db not cleanly shutdown
}
return new Result(Result.Outcome.storeNotCleanlyShutDown, null, null);
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class ProtocolTest method shouldSerializeAndDeserializeTransactionRepresentation.
@Test
public void shouldSerializeAndDeserializeTransactionRepresentation() throws Exception {
// GIVEN
PhysicalTransactionRepresentation transaction = new PhysicalTransactionRepresentation(justOneNode());
byte[] additionalHeader = "extra".getBytes();
int masterId = 1, authorId = 2;
long timeStarted = 12345, lastTxWhenStarted = 12, timeCommitted = timeStarted + 10;
transaction.setHeader(additionalHeader, masterId, authorId, timeStarted, lastTxWhenStarted, timeCommitted, -1);
Protocol.TransactionSerializer serializer = new Protocol.TransactionSerializer(transaction);
ChannelBuffer buffer = new ChannelBufferWrapper(new InMemoryClosableChannel());
// WHEN serializing the transaction
serializer.write(buffer);
// THEN deserializing the same transaction should yield the same data.
// ... remember that this deserializer doesn't read the data source name string. Read it manually here
assertEquals(NeoStoreDataSource.DEFAULT_DATA_SOURCE_NAME, Protocol.readString(buffer));
VersionAwareLogEntryReader<ReadableClosablePositionAwareChannel> reader = new VersionAwareLogEntryReader<>();
TransactionRepresentation readTransaction = new Protocol.TransactionRepresentationDeserializer(reader).read(buffer, ByteBuffer.allocate(1000));
assertArrayEquals(additionalHeader, readTransaction.additionalHeader());
assertEquals(masterId, readTransaction.getMasterId());
assertEquals(authorId, readTransaction.getAuthorId());
assertEquals(timeStarted, readTransaction.getTimeStarted());
assertEquals(lastTxWhenStarted, readTransaction.getLatestCommittedTxWhenStarted());
assertEquals(timeCommitted, readTransaction.getTimeCommitted());
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class LogsUpgrader method buildLogFiles.
private LogFiles buildLogFiles(DatabaseLayout layout) {
final LogEntryReader logEntryReader = new VersionAwareLogEntryReader(storageEngineFactory.commandReaderFactory());
final LogFiles logFiles;
try {
logFiles = LogFilesBuilder.builder(layout, fs).withLogEntryReader(logEntryReader).withConfig(config).withMemoryTracker(memoryTracker).withDatabaseHealth(databaseHealth).withDependencies(dependencyResolver).build();
} catch (IOException e) {
throw new RuntimeException(e);
}
return logFiles;
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method databasePanicShouldHandleOutOfMemoryErrors.
@Test
void databasePanicShouldHandleOutOfMemoryErrors() throws IOException, InterruptedException {
final CountDownLatch panicLatch = new CountDownLatch(1);
final CountDownLatch adversaryLatch = new CountDownLatch(1);
OutOfMemoryAwareFileSystem fs = new OutOfMemoryAwareFileSystem();
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth slowPanicDatabaseHealth = new SlowPanickingDatabaseHealth(panicLatch, adversaryLatch);
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(slowPanicDatabaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, slowPanicDatabaseHealth));
life.start();
// Commit initial transaction
appender.append(tx(), LogAppendEvent.NULL);
// Try to commit one transaction, will fail during flush with OOM, but not actually panic
fs.shouldOOM = true;
Future<Long> failingTransaction = executor.submit(() -> appender.append(tx(), LogAppendEvent.NULL));
panicLatch.await();
// Try to commit one additional transaction, should fail since database has already panicked
fs.shouldOOM = false;
var e = assertThrows(IOException.class, () -> appender.append(tx(), new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
adversaryLatch.countDown();
return super.beginLogForceWait();
}
}));
assertThat(e).hasMessageContaining("The database has encountered a critical error");
// Check that we actually got an OutOfMemoryError
var executionException = assertThrows(ExecutionException.class, failingTransaction::get);
assertThat(executionException).hasCauseInstanceOf(OutOfMemoryError.class);
// Check number of transactions, should only have one
LogEntryReader logEntryReader = new VersionAwareLogEntryReader(new TestCommandReaderFactory());
LogFile logFile = logFiles.getLogFile();
assertThat(logFile.getLowestLogVersion()).isEqualTo(logFile.getHighestLogVersion());
long version = logFile.getHighestLogVersion();
try (LogVersionedStoreChannel channel = logFile.openForVersion(version);
ReadAheadLogChannel readAheadLogChannel = new ReadAheadLogChannel(channel, INSTANCE);
LogEntryCursor cursor = new LogEntryCursor(logEntryReader, readAheadLogChannel)) {
LogEntry entry;
long numberOfTransactions = 0;
while (cursor.next()) {
entry = cursor.get();
if (entry instanceof LogEntryCommit) {
numberOfTransactions++;
}
}
assertThat(numberOfTransactions).isEqualTo(1L);
}
}
use of org.neo4j.kernel.impl.transaction.log.entry.VersionAwareLogEntryReader in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method shouldHaveAllConcurrentAppendersSeePanic.
/*
* There was an issue where if multiple concurrent appending threads did append and they moved on
* to await a force, where the force would fail and the one doing the force would raise a panic...
* the other threads may not notice the panic and move on to mark those transactions as committed
* and notice the panic later (which would be too late).
*/
@Test
void shouldHaveAllConcurrentAppendersSeePanic() throws Throwable {
// GIVEN
Adversary adversary = new ClassGuardedAdversary(new CountingAdversary(1, true), failMethod(TransactionLogFile.class, "force"));
EphemeralFileSystemAbstraction efs = new EphemeralFileSystemAbstraction();
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, efs);
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth databaseHealth = new DatabaseHealth(mock(DatabasePanicEventGenerator.class), NullLog.getInstance());
LogFiles logFiles = LogFilesBuilder.builder(databaseLayout, fs).withLogVersionRepository(logVersionRepository).withTransactionIdStore(transactionIdStore).withDatabaseHealth(databaseHealth).withLogEntryReader(new VersionAwareLogEntryReader(new TestCommandReaderFactory())).withStoreId(StoreId.UNKNOWN).build();
life.add(logFiles);
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFiles, logRotation, transactionMetadataCache, transactionIdStore, databaseHealth));
life.start();
// WHEN
int numberOfAppenders = 10;
final CountDownLatch trap = new CountDownLatch(numberOfAppenders);
final LogAppendEvent beforeForceTrappingEvent = new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
trap.countDown();
awaitLatch(trap);
return super.beginLogForceWait();
}
};
Race race = new Race();
for (int i = 0; i < numberOfAppenders; i++) {
race.addContestant(() -> {
// Good, we know that this test uses an adversarial file system which will throw
// an exception in LogFile#force, and since all these transactions
// will append and be forced in the same batch, where the force will fail then
// all these transactions should fail. If there's any transaction not failing then
// it just didn't notice the panic, which would be potentially hazardous.
assertThrows(IOException.class, () -> appender.append(tx(), beforeForceTrappingEvent));
});
}
// THEN perform the race. The relevant assertions are made inside the contestants.
race.go();
}
Aggregations