use of org.neo4j.causalclustering.core.state.machines.tx.ReplicatedTransaction in project neo4j by neo4j.
the class SegmentedRaftLogPartialEntryRecoveryTest method incompleteEntriesAtTheEndShouldNotCauseFailures.
@Test
public void incompleteEntriesAtTheEndShouldNotCauseFailures() throws Throwable {
// Given
// we use a RaftLog to create a raft log file and then we will start chopping bits off from the end
SegmentedRaftLog raftLog = createRaftLog(100_000);
raftLog.start();
// Add a bunch of entries, preferably one of each available kind.
raftLog.append(new RaftLogEntry(4, new NewLeaderBarrier()));
raftLog.append(new RaftLogEntry(4, new ReplicatedIdAllocationRequest(new MemberId(UUID.randomUUID()), IdType.RELATIONSHIP, 1, 1024)));
raftLog.append(new RaftLogEntry(4, new ReplicatedIdAllocationRequest(new MemberId(UUID.randomUUID()), IdType.RELATIONSHIP, 1025, 1024)));
raftLog.append(new RaftLogEntry(4, new ReplicatedLockTokenRequest(new MemberId(UUID.randomUUID()), 1)));
raftLog.append(new RaftLogEntry(4, new NewLeaderBarrier()));
raftLog.append(new RaftLogEntry(5, new ReplicatedTokenRequest(TokenType.LABEL, "labelToken", new byte[] { 1, 2, 3 })));
raftLog.append(new RaftLogEntry(5, new ReplicatedTransaction(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 })));
raftLog.stop();
// We use a temporary RecoveryProtocol to get the file to chop
RecoveryProtocol recovery = createRecoveryProtocol();
State recoveryState = recovery.run();
String logFilename = recoveryState.segments.last().getFilename();
File logFile = new File(logDirectory, logFilename);
// When
// We remove any number of bytes from the end (up to but not including the header) and try to recover
// Then
// No exceptions should be thrown
truncateAndRecover(logFile, SegmentHeader.SIZE);
}
use of org.neo4j.causalclustering.core.state.machines.tx.ReplicatedTransaction in project neo4j by neo4j.
the class RaftContentByteBufferMarshalTest method shouldSerializeTransactionRepresentation.
@Test
public void shouldSerializeTransactionRepresentation() throws Exception {
// given
CoreReplicatedContentMarshal serializer = new CoreReplicatedContentMarshal();
Collection<StorageCommand> commands = new ArrayList<>();
IndexCommand.AddNodeCommand addNodeCommand = new IndexCommand.AddNodeCommand();
addNodeCommand.init(0, 0, 0, 0);
commands.add(addNodeCommand);
byte[] extraHeader = new byte[0];
PhysicalTransactionRepresentation txIn = new PhysicalTransactionRepresentation(commands);
txIn.setHeader(extraHeader, -1, -1, 0, 0, 0, 0);
ReplicatedTransaction in = ReplicatedTransactionFactory.createImmutableReplicatedTransaction(txIn);
// when
ByteBuf buf = Unpooled.buffer();
serializer.marshal(in, new NetworkFlushableByteBuf(buf));
ReplicatedTransaction out = (ReplicatedTransaction) serializer.unmarshal(new NetworkReadableClosableChannelNetty4(buf));
TransactionRepresentation txOut = ReplicatedTransactionFactory.extractTransactionRepresentation(out, extraHeader);
// then
assertEquals(in, out);
assertEquals(txIn, txOut);
}
use of org.neo4j.causalclustering.core.state.machines.tx.ReplicatedTransaction in project neo4j by neo4j.
the class RaftContentByteBufferMarshalTest method txSerializationShouldNotResultInExcessZeroes.
@Test
public void txSerializationShouldNotResultInExcessZeroes() throws Exception {
/*
* This test ensures that the buffer used to serialize a transaction and then extract the byte array for
* sending over the wire is trimmed properly. Not doing so will result in sending too many trailing garbage
* (zeroes) that will be ignored from the other side, as zeros are interpreted as null entries from the
* LogEntryReader and stop the deserialization process.
* The test creates a single transaction which has just a header, no commands. That should amount to 40 bytes
* as ReplicatedTransactionFactory.TransactionSerializer.write() makes it out at the time of this writing. If
* that code changes, this test will break.
*/
byte[] extraHeader = new byte[0];
PhysicalTransactionRepresentation txIn = new PhysicalTransactionRepresentation(new ArrayList<>());
txIn.setHeader(extraHeader, -1, -1, 0, 0, 0, 0);
// when
ReplicatedTransaction in = ReplicatedTransactionFactory.createImmutableReplicatedTransaction(txIn);
// then
assertEquals(40, in.getTxBytes().length);
}
use of org.neo4j.causalclustering.core.state.machines.tx.ReplicatedTransaction in project neo4j by neo4j.
the class ReplayRaftLog method main.
public static void main(String[] args) throws IOException {
Args arg = Args.parse(args);
String from = arg.get("from");
System.out.println("From is " + from);
String to = arg.get("to");
System.out.println("to is " + to);
File logDirectory = new File(from);
System.out.println("logDirectory = " + logDirectory);
Config config = Config.embeddedDefaults(stringMap());
try (DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction()) {
LogProvider logProvider = getInstance();
CoreLogPruningStrategy pruningStrategy = new CoreLogPruningStrategyFactory(config.get(raft_log_pruning_strategy), logProvider).newInstance();
SegmentedRaftLog log = new SegmentedRaftLog(fileSystem, logDirectory, config.get(raft_log_rotation_size), new CoreReplicatedContentMarshal(), logProvider, config.get(raft_log_reader_pool_size), Clocks.systemClock(), new OnDemandJobScheduler(), pruningStrategy);
// Not really, but we need to have a way to pass in the commit index
long totalCommittedEntries = log.appendIndex();
for (int i = 0; i <= totalCommittedEntries; i++) {
ReplicatedContent content = readLogEntry(log, i).content();
if (content instanceof ReplicatedTransaction) {
ReplicatedTransaction tx = (ReplicatedTransaction) content;
ReplicatedTransactionFactory.extractTransactionRepresentation(tx, new byte[0]).accept(element -> {
System.out.println(element);
return false;
});
}
}
}
}
Aggregations