use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.
the class SnapshotWriterReaderTest method testWritingSnapshot.
@Test
public void testWritingSnapshot() throws Exception {
int recordsPerBatch = 3;
int batches = 3;
int delimiterCount = 2;
long magicTimestamp = 0xDEADBEEF;
OffsetAndEpoch id = new OffsetAndEpoch(recordsPerBatch * batches, 3);
List<List<String>> expected = buildRecords(recordsPerBatch, batches);
RaftClientTestContext.Builder contextBuilder = new RaftClientTestContext.Builder(localId, voters);
for (List<String> batch : expected) {
contextBuilder.appendToLog(id.epoch, batch);
}
RaftClientTestContext context = contextBuilder.build();
context.pollUntil(() -> context.currentLeader().equals(OptionalInt.of(localId)));
int epoch = context.currentEpoch();
context.advanceLocalLeaderHighWatermarkToLogEndOffset();
try (SnapshotWriter<String> snapshot = context.client.createSnapshot(id.offset - 1, id.epoch, magicTimestamp).get()) {
assertEquals(id, snapshot.snapshotId());
expected.forEach(batch -> assertDoesNotThrow(() -> snapshot.append(batch)));
snapshot.freeze();
}
try (SnapshotReader<String> reader = readSnapshot(context, id, Integer.MAX_VALUE)) {
RawSnapshotReader snapshot = context.log.readSnapshot(id).get();
int recordCount = validateDelimiters(snapshot, magicTimestamp);
assertEquals((recordsPerBatch * batches) + delimiterCount, recordCount);
assertSnapshot(expected, reader);
}
}
use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.
the class SnapshotWriterReaderTest method testAbortedSnapshot.
@Test
public void testAbortedSnapshot() throws Exception {
int recordsPerBatch = 3;
int batches = 3;
OffsetAndEpoch id = new OffsetAndEpoch(recordsPerBatch * batches, 3);
List<List<String>> expected = buildRecords(recordsPerBatch, batches);
RaftClientTestContext.Builder contextBuilder = new RaftClientTestContext.Builder(localId, voters);
for (List<String> batch : expected) {
contextBuilder.appendToLog(id.epoch, batch);
}
RaftClientTestContext context = contextBuilder.build();
context.pollUntil(() -> context.currentLeader().equals(OptionalInt.of(localId)));
int epoch = context.currentEpoch();
context.advanceLocalLeaderHighWatermarkToLogEndOffset();
try (SnapshotWriter<String> snapshot = context.client.createSnapshot(id.offset - 1, id.epoch, 0).get()) {
assertEquals(id, snapshot.snapshotId());
expected.forEach(batch -> {
assertDoesNotThrow(() -> snapshot.append(batch));
});
}
assertEquals(Optional.empty(), context.log.readSnapshot(id));
}
use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.
the class KafkaRaftMetricsTest method shouldRecordLogEnd.
@Test
public void shouldRecordLogEnd() throws IOException {
QuorumState state = buildQuorumState(Collections.singleton(localId));
state.initialize(new OffsetAndEpoch(0L, 0));
raftMetrics = new KafkaRaftMetrics(metrics, "raft", state);
assertEquals((double) 0L, getMetric(metrics, "log-end-offset").metricValue());
assertEquals((double) 0, getMetric(metrics, "log-end-epoch").metricValue());
raftMetrics.updateLogEnd(new OffsetAndEpoch(5L, 1));
assertEquals((double) 5L, getMetric(metrics, "log-end-offset").metricValue());
assertEquals((double) 1, getMetric(metrics, "log-end-epoch").metricValue());
}
use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.
the class FileRawSnapshotTest method testAppendToFrozenSnapshot.
@Test
public void testAppendToFrozenSnapshot() throws IOException {
OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
int bufferSize = 256;
int numberOfBatches = 10;
try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
for (int i = 0; i < numberOfBatches; i++) {
snapshot.append(records);
}
snapshot.freeze();
assertThrows(RuntimeException.class, () -> snapshot.append(records));
}
// File should exist and the size should be greater than the sum of all the buffers
assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
assertTrue(Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)) > bufferSize * numberOfBatches);
}
use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.
the class FileRawSnapshotTest method testPartialWriteReadSnapshot.
@Test
public void testPartialWriteReadSnapshot() throws IOException {
Path tempDir = TestUtils.tempDirectory().toPath();
OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
ByteBuffer records = buildRecords(ByteBuffer.wrap(Utils.utf8("foo"))).buffer();
ByteBuffer expectedBuffer = ByteBuffer.wrap(records.array());
ByteBuffer buffer1 = expectedBuffer.duplicate();
buffer1.position(0);
buffer1.limit(expectedBuffer.limit() / 2);
ByteBuffer buffer2 = expectedBuffer.duplicate();
buffer2.position(expectedBuffer.limit() / 2);
buffer2.limit(expectedBuffer.limit());
try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
snapshot.append(new UnalignedMemoryRecords(buffer1));
snapshot.append(new UnalignedMemoryRecords(buffer2));
snapshot.freeze();
}
try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
int totalSize = Math.toIntExact(snapshot.sizeInBytes());
assertEquals(expectedBuffer.remaining(), totalSize);
UnalignedFileRecords record1 = (UnalignedFileRecords) snapshot.slice(0, totalSize / 2);
UnalignedFileRecords record2 = (UnalignedFileRecords) snapshot.slice(totalSize / 2, totalSize - totalSize / 2);
assertEquals(buffer1, TestUtils.toBuffer(record1));
assertEquals(buffer2, TestUtils.toBuffer(record2));
ByteBuffer readBuffer = ByteBuffer.allocate(record1.sizeInBytes() + record2.sizeInBytes());
readBuffer.put(TestUtils.toBuffer(record1));
readBuffer.put(TestUtils.toBuffer(record2));
readBuffer.flip();
assertEquals(expectedBuffer, readBuffer);
}
}
Aggregations