Search in sources :

Example 6 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class SnapshotWriterReaderTest method testWritingSnapshot.

@Test
public void testWritingSnapshot() throws Exception {
    int recordsPerBatch = 3;
    int batches = 3;
    int delimiterCount = 2;
    long magicTimestamp = 0xDEADBEEF;
    OffsetAndEpoch id = new OffsetAndEpoch(recordsPerBatch * batches, 3);
    List<List<String>> expected = buildRecords(recordsPerBatch, batches);
    RaftClientTestContext.Builder contextBuilder = new RaftClientTestContext.Builder(localId, voters);
    for (List<String> batch : expected) {
        contextBuilder.appendToLog(id.epoch, batch);
    }
    RaftClientTestContext context = contextBuilder.build();
    context.pollUntil(() -> context.currentLeader().equals(OptionalInt.of(localId)));
    int epoch = context.currentEpoch();
    context.advanceLocalLeaderHighWatermarkToLogEndOffset();
    try (SnapshotWriter<String> snapshot = context.client.createSnapshot(id.offset - 1, id.epoch, magicTimestamp).get()) {
        assertEquals(id, snapshot.snapshotId());
        expected.forEach(batch -> assertDoesNotThrow(() -> snapshot.append(batch)));
        snapshot.freeze();
    }
    try (SnapshotReader<String> reader = readSnapshot(context, id, Integer.MAX_VALUE)) {
        RawSnapshotReader snapshot = context.log.readSnapshot(id).get();
        int recordCount = validateDelimiters(snapshot, magicTimestamp);
        assertEquals((recordsPerBatch * batches) + delimiterCount, recordCount);
        assertSnapshot(expected, reader);
    }
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) RaftClientTestContext(org.apache.kafka.raft.RaftClientTestContext) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.jupiter.api.Test)

Example 7 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class SnapshotWriterReaderTest method testAbortedSnapshot.

@Test
public void testAbortedSnapshot() throws Exception {
    int recordsPerBatch = 3;
    int batches = 3;
    OffsetAndEpoch id = new OffsetAndEpoch(recordsPerBatch * batches, 3);
    List<List<String>> expected = buildRecords(recordsPerBatch, batches);
    RaftClientTestContext.Builder contextBuilder = new RaftClientTestContext.Builder(localId, voters);
    for (List<String> batch : expected) {
        contextBuilder.appendToLog(id.epoch, batch);
    }
    RaftClientTestContext context = contextBuilder.build();
    context.pollUntil(() -> context.currentLeader().equals(OptionalInt.of(localId)));
    int epoch = context.currentEpoch();
    context.advanceLocalLeaderHighWatermarkToLogEndOffset();
    try (SnapshotWriter<String> snapshot = context.client.createSnapshot(id.offset - 1, id.epoch, 0).get()) {
        assertEquals(id, snapshot.snapshotId());
        expected.forEach(batch -> {
            assertDoesNotThrow(() -> snapshot.append(batch));
        });
    }
    assertEquals(Optional.empty(), context.log.readSnapshot(id));
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) RaftClientTestContext(org.apache.kafka.raft.RaftClientTestContext) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.jupiter.api.Test)

Example 8 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class KafkaRaftMetricsTest method shouldRecordLogEnd.

@Test
public void shouldRecordLogEnd() throws IOException {
    QuorumState state = buildQuorumState(Collections.singleton(localId));
    state.initialize(new OffsetAndEpoch(0L, 0));
    raftMetrics = new KafkaRaftMetrics(metrics, "raft", state);
    assertEquals((double) 0L, getMetric(metrics, "log-end-offset").metricValue());
    assertEquals((double) 0, getMetric(metrics, "log-end-epoch").metricValue());
    raftMetrics.updateLogEnd(new OffsetAndEpoch(5L, 1));
    assertEquals((double) 5L, getMetric(metrics, "log-end-offset").metricValue());
    assertEquals((double) 1, getMetric(metrics, "log-end-epoch").metricValue());
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) QuorumState(org.apache.kafka.raft.QuorumState) Test(org.junit.jupiter.api.Test)

Example 9 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testAppendToFrozenSnapshot.

@Test
public void testAppendToFrozenSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int numberOfBatches = 10;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
        snapshot.freeze();
        assertThrows(RuntimeException.class, () -> snapshot.append(records));
    }
    // File should exist and the size should be greater than the sum of all the buffers
    assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    assertTrue(Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)) > bufferSize * numberOfBatches);
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Test(org.junit.jupiter.api.Test)

Example 10 with OffsetAndEpoch

use of org.apache.kafka.raft.OffsetAndEpoch in project kafka by apache.

the class FileRawSnapshotTest method testPartialWriteReadSnapshot.

@Test
public void testPartialWriteReadSnapshot() throws IOException {
    Path tempDir = TestUtils.tempDirectory().toPath();
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    ByteBuffer records = buildRecords(ByteBuffer.wrap(Utils.utf8("foo"))).buffer();
    ByteBuffer expectedBuffer = ByteBuffer.wrap(records.array());
    ByteBuffer buffer1 = expectedBuffer.duplicate();
    buffer1.position(0);
    buffer1.limit(expectedBuffer.limit() / 2);
    ByteBuffer buffer2 = expectedBuffer.duplicate();
    buffer2.position(expectedBuffer.limit() / 2);
    buffer2.limit(expectedBuffer.limit());
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        snapshot.append(new UnalignedMemoryRecords(buffer1));
        snapshot.append(new UnalignedMemoryRecords(buffer2));
        snapshot.freeze();
    }
    try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
        int totalSize = Math.toIntExact(snapshot.sizeInBytes());
        assertEquals(expectedBuffer.remaining(), totalSize);
        UnalignedFileRecords record1 = (UnalignedFileRecords) snapshot.slice(0, totalSize / 2);
        UnalignedFileRecords record2 = (UnalignedFileRecords) snapshot.slice(totalSize / 2, totalSize - totalSize / 2);
        assertEquals(buffer1, TestUtils.toBuffer(record1));
        assertEquals(buffer2, TestUtils.toBuffer(record2));
        ByteBuffer readBuffer = ByteBuffer.allocate(record1.sizeInBytes() + record2.sizeInBytes());
        readBuffer.put(TestUtils.toBuffer(record1));
        readBuffer.put(TestUtils.toBuffer(record2));
        readBuffer.flip();
        assertEquals(expectedBuffer, readBuffer);
    }
}
Also used : Path(java.nio.file.Path) OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedFileRecords(org.apache.kafka.common.record.UnalignedFileRecords) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Aggregations

OffsetAndEpoch (org.apache.kafka.raft.OffsetAndEpoch)25 Test (org.junit.jupiter.api.Test)22 Path (java.nio.file.Path)8 UnalignedMemoryRecords (org.apache.kafka.common.record.UnalignedMemoryRecords)8 QuorumState (org.apache.kafka.raft.QuorumState)7 ByteBuffer (java.nio.ByteBuffer)4 RaftClientTestContext (org.apache.kafka.raft.RaftClientTestContext)4 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 Record (org.apache.kafka.common.record.Record)3 RecordBatch (org.apache.kafka.common.record.RecordBatch)3 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)3 UnalignedFileRecords (org.apache.kafka.common.record.UnalignedFileRecords)3 GrowableBufferSupplier (org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier)3 IOException (java.io.IOException)2 Files (java.nio.file.Files)2 Arrays (java.util.Arrays)2 Iterator (java.util.Iterator)2 Optional (java.util.Optional)2