Search in sources :

Example 1 with UnalignedMemoryRecords

use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.

the class FileRawSnapshotTest method testAppendToFrozenSnapshot.

@Test
public void testAppendToFrozenSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int numberOfBatches = 10;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
        }
        snapshot.freeze();
        assertThrows(RuntimeException.class, () -> snapshot.append(records));
    }
    // File should exist and the size should be greater than the sum of all the buffers
    assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    assertTrue(Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)) > bufferSize * numberOfBatches);
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Test(org.junit.jupiter.api.Test)

Example 2 with UnalignedMemoryRecords

use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.

the class FileRawSnapshotTest method testPartialWriteReadSnapshot.

@Test
public void testPartialWriteReadSnapshot() throws IOException {
    Path tempDir = TestUtils.tempDirectory().toPath();
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    ByteBuffer records = buildRecords(ByteBuffer.wrap(Utils.utf8("foo"))).buffer();
    ByteBuffer expectedBuffer = ByteBuffer.wrap(records.array());
    ByteBuffer buffer1 = expectedBuffer.duplicate();
    buffer1.position(0);
    buffer1.limit(expectedBuffer.limit() / 2);
    ByteBuffer buffer2 = expectedBuffer.duplicate();
    buffer2.position(expectedBuffer.limit() / 2);
    buffer2.limit(expectedBuffer.limit());
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        snapshot.append(new UnalignedMemoryRecords(buffer1));
        snapshot.append(new UnalignedMemoryRecords(buffer2));
        snapshot.freeze();
    }
    try (FileRawSnapshotReader snapshot = FileRawSnapshotReader.open(tempDir, offsetAndEpoch)) {
        int totalSize = Math.toIntExact(snapshot.sizeInBytes());
        assertEquals(expectedBuffer.remaining(), totalSize);
        UnalignedFileRecords record1 = (UnalignedFileRecords) snapshot.slice(0, totalSize / 2);
        UnalignedFileRecords record2 = (UnalignedFileRecords) snapshot.slice(totalSize / 2, totalSize - totalSize / 2);
        assertEquals(buffer1, TestUtils.toBuffer(record1));
        assertEquals(buffer2, TestUtils.toBuffer(record2));
        ByteBuffer readBuffer = ByteBuffer.allocate(record1.sizeInBytes() + record2.sizeInBytes());
        readBuffer.put(TestUtils.toBuffer(record1));
        readBuffer.put(TestUtils.toBuffer(record2));
        readBuffer.flip();
        assertEquals(expectedBuffer, readBuffer);
    }
}
Also used : Path(java.nio.file.Path) OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedFileRecords(org.apache.kafka.common.record.UnalignedFileRecords) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 3 with UnalignedMemoryRecords

use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.

the class FileRawSnapshotTest method testWritingSnapshot.

@Test
public void testWritingSnapshot() throws IOException {
    OffsetAndEpoch offsetAndEpoch = new OffsetAndEpoch(10L, 3);
    int bufferSize = 256;
    int numberOfBatches = 10;
    int expectedSize = 0;
    try (FileRawSnapshotWriter snapshot = createSnapshotWriter(tempDir, offsetAndEpoch)) {
        assertEquals(0, snapshot.sizeInBytes());
        UnalignedMemoryRecords records = buildRecords(ByteBuffer.wrap(randomBytes(bufferSize)));
        for (int i = 0; i < numberOfBatches; i++) {
            snapshot.append(records);
            expectedSize += records.sizeInBytes();
        }
        assertEquals(expectedSize, snapshot.sizeInBytes());
        snapshot.freeze();
    }
    // File should exist and the size should be the sum of all the buffers
    assertTrue(Files.exists(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
    assertEquals(expectedSize, Files.size(Snapshots.snapshotPath(tempDir, offsetAndEpoch)));
}
Also used : OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) Test(org.junit.jupiter.api.Test)

Example 4 with UnalignedMemoryRecords

use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.

the class MockRawSnapshotReader method slice.

@Override
public UnalignedRecords slice(long position, int size) {
    ByteBuffer buffer = data.buffer();
    buffer.position(Math.toIntExact(position));
    buffer.limit(Math.min(buffer.limit(), Math.toIntExact(position + size)));
    return new UnalignedMemoryRecords(buffer.slice());
}
Also used : UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) ByteBuffer(java.nio.ByteBuffer)

Example 5 with UnalignedMemoryRecords

use of org.apache.kafka.common.record.UnalignedMemoryRecords in project kafka by apache.

the class KafkaRaftClientSnapshotTest method testPartialFetchSnapshotRequestAsLeader.

@Test
public void testPartialFetchSnapshotRequestAsLeader() throws Exception {
    int localId = 0;
    Set<Integer> voters = Utils.mkSet(localId, localId + 1);
    OffsetAndEpoch snapshotId = new OffsetAndEpoch(2, 1);
    List<String> records = Arrays.asList("foo", "bar");
    RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).appendToLog(snapshotId.epoch, records).build();
    context.becomeLeader();
    int epoch = context.currentEpoch();
    context.advanceLocalLeaderHighWatermarkToLogEndOffset();
    try (SnapshotWriter<String> snapshot = context.client.createSnapshot(snapshotId.offset - 1, snapshotId.epoch, 0).get()) {
        assertEquals(snapshotId, snapshot.snapshotId());
        snapshot.append(records);
        snapshot.freeze();
    }
    RawSnapshotReader snapshot = context.log.readSnapshot(snapshotId).get();
    // Fetch half of the snapshot
    context.deliverRequest(fetchSnapshotRequest(context.metadataPartition, epoch, snapshotId, Math.toIntExact(snapshot.sizeInBytes() / 2), 0));
    context.client.poll();
    FetchSnapshotResponseData.PartitionSnapshot response = context.assertSentFetchSnapshotResponse(context.metadataPartition).get();
    assertEquals(Errors.NONE, Errors.forCode(response.errorCode()));
    assertEquals(snapshot.sizeInBytes(), response.size());
    assertEquals(0, response.position());
    assertEquals(snapshot.sizeInBytes() / 2, response.unalignedRecords().sizeInBytes());
    UnalignedMemoryRecords memoryRecords = (UnalignedMemoryRecords) snapshot.slice(0, Math.toIntExact(snapshot.sizeInBytes()));
    ByteBuffer snapshotBuffer = memoryRecords.buffer();
    ByteBuffer responseBuffer = ByteBuffer.allocate(Math.toIntExact(snapshot.sizeInBytes()));
    responseBuffer.put(((UnalignedMemoryRecords) response.unalignedRecords()).buffer());
    ByteBuffer expectedBytes = snapshotBuffer.duplicate();
    expectedBytes.limit(Math.toIntExact(snapshot.sizeInBytes() / 2));
    assertEquals(expectedBytes, responseBuffer.duplicate().flip());
    // Fetch the remainder of the snapshot
    context.deliverRequest(fetchSnapshotRequest(context.metadataPartition, epoch, snapshotId, Integer.MAX_VALUE, responseBuffer.position()));
    context.client.poll();
    response = context.assertSentFetchSnapshotResponse(context.metadataPartition).get();
    assertEquals(Errors.NONE, Errors.forCode(response.errorCode()));
    assertEquals(snapshot.sizeInBytes(), response.size());
    assertEquals(responseBuffer.position(), response.position());
    assertEquals(snapshot.sizeInBytes() - (snapshot.sizeInBytes() / 2), response.unalignedRecords().sizeInBytes());
    responseBuffer.put(((UnalignedMemoryRecords) response.unalignedRecords()).buffer());
    assertEquals(snapshotBuffer, responseBuffer.flip());
}
Also used : FetchSnapshotResponseData(org.apache.kafka.common.message.FetchSnapshotResponseData) RawSnapshotReader(org.apache.kafka.snapshot.RawSnapshotReader) ByteBuffer(java.nio.ByteBuffer) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords) SnapshotWriterReaderTest(org.apache.kafka.snapshot.SnapshotWriterReaderTest) Test(org.junit.jupiter.api.Test)

Aggregations

UnalignedMemoryRecords (org.apache.kafka.common.record.UnalignedMemoryRecords)12 Test (org.junit.jupiter.api.Test)10 OffsetAndEpoch (org.apache.kafka.raft.OffsetAndEpoch)7 ByteBuffer (java.nio.ByteBuffer)6 FetchSnapshotResponseData (org.apache.kafka.common.message.FetchSnapshotResponseData)3 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)3 Path (java.nio.file.Path)2 Record (org.apache.kafka.common.record.Record)2 RecordBatch (org.apache.kafka.common.record.RecordBatch)2 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)2 UnalignedFileRecords (org.apache.kafka.common.record.UnalignedFileRecords)2 GrowableBufferSupplier (org.apache.kafka.common.utils.BufferSupplier.GrowableBufferSupplier)2 RawSnapshotReader (org.apache.kafka.snapshot.RawSnapshotReader)2 SnapshotWriterReaderTest (org.apache.kafka.snapshot.SnapshotWriterReaderTest)2 IOException (java.io.IOException)1 Files (java.nio.file.Files)1 Arrays (java.util.Arrays)1 Iterator (java.util.Iterator)1 Optional (java.util.Optional)1 OptionalInt (java.util.OptionalInt)1