Search in sources :

Example 1 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class FileLogInputStreamTest method testWriteTo.

@Test
public void testWriteTo() throws IOException {
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        fileRecords.append(MemoryRecords.withRecords(magic, compression, new SimpleRecord("foo".getBytes())));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch batch = logInputStream.nextBatch();
        assertNotNull(batch);
        assertEquals(magic, batch.magic());
        ByteBuffer buffer = ByteBuffer.allocate(128);
        batch.writeTo(buffer);
        buffer.flip();
        MemoryRecords memRecords = MemoryRecords.readableRecords(buffer);
        List<Record> records = Utils.toList(memRecords.records().iterator());
        assertEquals(1, records.size());
        Record record0 = records.get(0);
        assertTrue(record0.hasMagic(magic));
        assertEquals("foo", Utils.utf8(record0.value(), record0.valueSize()));
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 2 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class FileLogInputStreamTest method testSimpleBatchIteration.

@Test
public void testSimpleBatchIteration() throws IOException {
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord firstBatchRecord = new SimpleRecord(3241324L, "a".getBytes(), "foo".getBytes());
        SimpleRecord secondBatchRecord = new SimpleRecord(234280L, "b".getBytes(), "bar".getBytes());
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecord);
        assertNoProducerData(firstBatch);
        FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
        assertGenericRecordBatchData(secondBatch, 1L, 234280L, secondBatchRecord);
        assertNoProducerData(secondBatch);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) Test(org.junit.Test)

Example 3 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class FileLogInputStreamTest method testBatchIterationWithMultipleRecordsPerBatch.

@Test
public void testBatchIterationWithMultipleRecordsPerBatch() throws IOException {
    if (magic < MAGIC_VALUE_V2 && compression == CompressionType.NONE)
        return;
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
        SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecords));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecords));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertNoProducerData(firstBatch);
        assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecords);
        FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
        assertNoProducerData(secondBatch);
        assertGenericRecordBatchData(secondBatch, 1L, 238423489L, secondBatchRecords);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) Test(org.junit.Test)

Example 4 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.

the class FileLogInputStreamTest method testBatchIterationIncompleteBatch.

@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testBatchIterationIncompleteBatch(Args args) throws IOException {
    CompressionType compression = args.compression;
    byte magic = args.magic;
    if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
        return;
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord firstBatchRecord = new SimpleRecord(100L, "foo".getBytes());
        SimpleRecord secondBatchRecord = new SimpleRecord(200L, "bar".getBytes());
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
        fileRecords.flush();
        fileRecords.truncateTo(fileRecords.sizeInBytes() - 13);
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertNoProducerData(firstBatch);
        assertGenericRecordBatchData(args, firstBatch, 0L, 100L, firstBatchRecord);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 5 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.

the class SnapshotFileReader method handleMetadataBatch.

private void handleMetadataBatch(FileChannelRecordBatch batch) {
    List<ApiMessageAndVersion> messages = new ArrayList<>();
    for (Record record : batch) {
        ByteBufferAccessor accessor = new ByteBufferAccessor(record.value());
        try {
            ApiMessageAndVersion messageAndVersion = serde.read(accessor, record.valueSize());
            messages.add(messageAndVersion);
        } catch (Throwable e) {
            log.error("unable to read metadata record at offset {}", record.offset(), e);
        }
    }
    listener.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(batch.baseOffset(), batch.partitionLeaderEpoch(), batch.maxTimestamp(), batch.sizeInBytes(), messages)), reader -> {
    }));
}
Also used : MemoryBatchReader(org.apache.kafka.raft.internals.MemoryBatchReader) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) OptionalInt(java.util.OptionalInt) Record(org.apache.kafka.common.record.Record) ArrayList(java.util.ArrayList) KafkaEventQueue(org.apache.kafka.queue.KafkaEventQueue) LogContext(org.apache.kafka.common.utils.LogContext) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) FileRecords(org.apache.kafka.common.record.FileRecords) LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) MetadataRecordSerde(org.apache.kafka.metadata.MetadataRecordSerde) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) File(java.io.File) Batch(org.apache.kafka.raft.Batch) List(java.util.List) EventQueue(org.apache.kafka.queue.EventQueue) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) RaftClient(org.apache.kafka.raft.RaftClient) Collections(java.util.Collections) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) Record(org.apache.kafka.common.record.Record) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor)

Aggregations

FileChannelRecordBatch (org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch)12 Test (org.junit.Test)5 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)5 ArgumentsSource (org.junit.jupiter.params.provider.ArgumentsSource)5 ByteBuffer (java.nio.ByteBuffer)2 File (java.io.File)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 Iterator (java.util.Iterator)1 List (java.util.List)1 OptionalInt (java.util.OptionalInt)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 LeaderChangeMessage (org.apache.kafka.common.message.LeaderChangeMessage)1 ByteBufferAccessor (org.apache.kafka.common.protocol.ByteBufferAccessor)1 ControlRecordType (org.apache.kafka.common.record.ControlRecordType)1 FileRecords (org.apache.kafka.common.record.FileRecords)1 Record (org.apache.kafka.common.record.Record)1 LogContext (org.apache.kafka.common.utils.LogContext)1 Time (org.apache.kafka.common.utils.Time)1 MetadataRecordSerde (org.apache.kafka.metadata.MetadataRecordSerde)1