use of org.neo4j.kernel.impl.transaction.DeadSimpleLogVersionRepository in project neo4j by neo4j.
the class BatchingTransactionAppenderConcurrencyTest method shouldHaveAllConcurrentAppendersSeePanic.
/*
* There was an issue where if multiple concurrent appending threads did append and they moved on
* to await a force, where the force would fail and the one doing the force would raise a panic...
* the other threads may not notice the panic and move on to mark those transactions as committed
* and notice the panic later (which would be too late).
*/
@Test
public void shouldHaveAllConcurrentAppendersSeePanic() throws Throwable {
// GIVEN
Adversary adversary = new ClassGuardedAdversary(new CountingAdversary(1, true), failMethod(BatchingTransactionAppender.class, "force"));
EphemeralFileSystemAbstraction efs = new EphemeralFileSystemAbstraction();
File directory = new File("dir").getCanonicalFile();
efs.mkdirs(directory);
FileSystemAbstraction fs = new AdversarialFileSystemAbstraction(adversary, efs);
life.add(new FileSystemLifecycleAdapter(fs));
DatabaseHealth databaseHealth = new DatabaseHealth(mock(DatabasePanicEventGenerator.class), NullLog.getInstance());
PhysicalLogFiles logFiles = new PhysicalLogFiles(directory, fs);
LogFile logFile = life.add(new PhysicalLogFile(fs, logFiles, kibiBytes(10), transactionIdStore::getLastCommittedTransactionId, new DeadSimpleLogVersionRepository(0), new PhysicalLogFile.Monitor.Adapter(), logHeaderCache));
final BatchingTransactionAppender appender = life.add(new BatchingTransactionAppender(logFile, logRotation, transactionMetadataCache, transactionIdStore, legacyIndexTransactionOrdering, databaseHealth));
life.start();
// WHEN
int numberOfAppenders = 10;
final CountDownLatch trap = new CountDownLatch(numberOfAppenders);
final LogAppendEvent beforeForceTrappingEvent = new LogAppendEvent.Empty() {
@Override
public LogForceWaitEvent beginLogForceWait() {
trap.countDown();
awaitLatch(trap);
return super.beginLogForceWait();
}
};
Race race = new Race();
for (int i = 0; i < numberOfAppenders; i++) {
race.addContestant(() -> {
try {
// Append to the log, the LogAppenderEvent will have all of the appending threads
// do wait for all of the other threads to start the force thing
appender.append(tx(), beforeForceTrappingEvent);
fail("No transaction should be considered appended");
} catch (IOException e) {
// Good, we know that this test uses an adversarial file system which will throw
// an exception in BatchingTransactionAppender#force, and since all these transactions
// will append and be forced in the same batch, where the force will fail then
// all these transactions should fail. If there's any transaction not failing then
// it just didn't notice the panic, which would be potentially hazardous.
}
});
}
// THEN perform the race. The relevant assertions are made inside the contestants.
race.go();
}
use of org.neo4j.kernel.impl.transaction.DeadSimpleLogVersionRepository in project neo4j by neo4j.
the class PhysicalLogFileRotateAndReadRaceIT method shouldNotSeeEmptyLogFileWhenReadingTransactionStream.
@Test
public void shouldNotSeeEmptyLogFileWhenReadingTransactionStream() throws Exception {
// GIVEN
PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory(), fileSystemRule.get());
LogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(0);
PhysicalLogFile.Monitor monitor = mock(PhysicalLogFile.Monitor.class);
LogHeaderCache headerCache = new LogHeaderCache(10);
PhysicalLogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, kibiBytes(1), () -> 2L, logVersionRepository, monitor, headerCache));
FlushablePositionAwareChannel writer = logFile.getWriter();
LogPositionMarker startPosition = new LogPositionMarker();
writer.getCurrentPosition(startPosition);
// WHEN
AtomicBoolean end = new AtomicBoolean();
byte[] dataChunk = new byte[100];
// one thread constantly writing to and rotating the channel
AtomicInteger rotations = new AtomicInteger();
CountDownLatch startSignal = new CountDownLatch(1);
Future<Void> writeFuture = t2.execute(ignored -> {
ThreadLocalRandom random = ThreadLocalRandom.current();
startSignal.countDown();
while (!end.get()) {
writer.put(dataChunk, random.nextInt(1, dataChunk.length));
if (logFile.rotationNeeded()) {
logFile.rotate();
writer.getCurrentPosition(startPosition);
rotations.incrementAndGet();
}
}
return null;
});
assertTrue(startSignal.await(10, SECONDS));
// one thread reading through the channel
long maxEndTime = currentTimeMillis() + LIMIT_TIME;
int reads = 0;
try {
for (; currentTimeMillis() < maxEndTime && reads < LIMIT_READS && rotations.get() < LIMIT_ROTATIONS; reads++) {
try (ReadableLogChannel reader = logFile.getReader(startPosition.newPosition())) {
deplete(reader);
}
}
} finally {
end.set(true);
writeFuture.get();
}
// THEN simply getting here means this was successful
}
use of org.neo4j.kernel.impl.transaction.DeadSimpleLogVersionRepository in project neo4j by neo4j.
the class TransactionLogAppendAndRotateIT method shouldKeepTransactionsIntactWhenConcurrentlyRotationAndAppending.
@Test
public void shouldKeepTransactionsIntactWhenConcurrentlyRotationAndAppending() throws Throwable {
// GIVEN
PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory().getAbsoluteFile(), fileSystemRule.get());
long rotationThreshold = mebiBytes(1);
LogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(0);
final AtomicBoolean end = new AtomicBoolean();
AllTheMonitoring monitoring = new AllTheMonitoring(end, 100);
TransactionIdStore txIdStore = new DeadSimpleTransactionIdStore();
TransactionMetadataCache metadataCache = new TransactionMetadataCache(100);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
LogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, rotationThreshold, () -> txIdStore.getLastCommittedTransactionId(), logVersionRepository, monitoring, logHeaderCache));
monitoring.setLogFile(logFile);
DatabaseHealth health = new DatabaseHealth(mock(DatabasePanicEventGenerator.class), NullLog.getInstance());
LogRotation rotation = new LogRotationImpl(monitoring, logFile, health);
final TransactionAppender appender = life.add(new BatchingTransactionAppender(logFile, rotation, metadataCache, txIdStore, BYPASS, health));
// WHEN
Race race = new Race();
for (int i = 0; i < 10; i++) {
race.addContestant(new Runnable() {
@Override
public void run() {
while (!end.get()) {
try {
appender.append(new TransactionToApply(sillyTransaction(1_000)), NULL);
} catch (Exception e) {
e.printStackTrace(System.out);
end.set(true);
fail(e.getMessage());
}
}
}
});
}
race.addContestant(endAfterMax(10, SECONDS, end));
race.go();
// THEN
assertTrue(monitoring.numberOfRotations() > 0);
}
use of org.neo4j.kernel.impl.transaction.DeadSimpleLogVersionRepository in project neo4j by neo4j.
the class Runner method createPhysicalLogFile.
private PhysicalLogFile createPhysicalLogFile(TransactionIdStore transactionIdStore, LogHeaderCache logHeaderCache, FileSystemAbstraction fileSystemAbstraction) {
PhysicalLogFiles logFiles = new PhysicalLogFiles(workingDirectory, fileSystemAbstraction);
long rotateAtSize = Settings.BYTES.apply(GraphDatabaseSettings.logical_log_rotation_threshold.getDefaultValue());
DeadSimpleLogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(0);
return new PhysicalLogFile(fileSystemAbstraction, logFiles, rotateAtSize, transactionIdStore::getLastCommittedTransactionId, logVersionRepository, PhysicalLogFile.NO_MONITOR, logHeaderCache);
}
use of org.neo4j.kernel.impl.transaction.DeadSimpleLogVersionRepository in project neo4j by neo4j.
the class LatestCheckPointFinderTest method logFile.
private LogCreator logFile(Entry... entries) {
return (logVersion, positions) -> {
try {
AtomicLong lastTxId = new AtomicLong();
Supplier<Long> lastTxIdSupplier = () -> lastTxId.get();
LogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(logVersion);
LifeSupport life = new LifeSupport();
life.start();
PhysicalLogFile logFile = life.add(new PhysicalLogFile(fsRule.get(), logFiles, mebiBytes(1), lastTxIdSupplier, logVersionRepository, NO_MONITOR, new LogHeaderCache(10)));
try {
FlushablePositionAwareChannel writeChannel = logFile.getWriter();
LogPositionMarker positionMarker = new LogPositionMarker();
LogEntryWriter writer = new LogEntryWriter(writeChannel);
for (Entry entry : entries) {
LogPosition currentPosition = writeChannel.getCurrentPosition(positionMarker).newPosition();
positions.put(entry, currentPosition);
if (entry instanceof StartEntry) {
writer.writeStartEntry(0, 0, 0, 0, new byte[0]);
} else if (entry instanceof CommitEntry) {
CommitEntry commitEntry = (CommitEntry) entry;
writer.writeCommitEntry(commitEntry.txId, 0);
lastTxId.set(commitEntry.txId);
} else if (entry instanceof CheckPointEntry) {
CheckPointEntry checkPointEntry = (CheckPointEntry) entry;
Entry target = checkPointEntry.withPositionOfEntry;
LogPosition logPosition = target != null ? positions.get(target) : currentPosition;
assert logPosition != null : "No registered log position for " + target;
writer.writeCheckPointEntry(logPosition);
} else if (entry instanceof PositionEntry) {
// Don't write anything, this entry is just for registering a position so that
// another CheckPointEntry can refer to it
} else {
throw new IllegalArgumentException("Unknown entry " + entry);
}
}
} finally {
life.shutdown();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
Aggregations