Search in sources :

Example 6 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.

the class SnapshotFileReader method handleNextBatch.

private void handleNextBatch() {
    if (!batchIterator.hasNext()) {
        beginShutdown("done");
        return;
    }
    FileChannelRecordBatch batch = batchIterator.next();
    if (batch.isControlBatch()) {
        handleControlBatch(batch);
    } else {
        handleMetadataBatch(batch);
    }
    scheduleHandleNextBatch();
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch)

Example 7 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class FileLogInputStreamTest method testBatchIterationV2.

@Test
public void testBatchIterationV2() throws IOException {
    if (magic != MAGIC_VALUE_V2)
        return;
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        long producerId = 83843L;
        short producerEpoch = 15;
        int baseSequence = 234;
        int partitionLeaderEpoch = 9832;
        SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
        SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
        fileRecords.append(MemoryRecords.withIdempotentRecords(magic, 15L, compression, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, firstBatchRecords));
        fileRecords.append(MemoryRecords.withTransactionalRecords(magic, 27L, compression, producerId, producerEpoch, baseSequence + firstBatchRecords.length, partitionLeaderEpoch, secondBatchRecords));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertProducerData(firstBatch, producerId, producerEpoch, baseSequence, false, firstBatchRecords);
        assertGenericRecordBatchData(firstBatch, 15L, 3241324L, firstBatchRecords);
        assertEquals(partitionLeaderEpoch, firstBatch.partitionLeaderEpoch());
        FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
        assertProducerData(secondBatch, producerId, producerEpoch, baseSequence + firstBatchRecords.length, true, secondBatchRecords);
        assertGenericRecordBatchData(secondBatch, 27L, 238423489L, secondBatchRecords);
        assertEquals(partitionLeaderEpoch, secondBatch.partitionLeaderEpoch());
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) Test(org.junit.Test)

Example 8 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.

the class FileLogInputStreamTest method testBatchIterationIncompleteBatch.

@Test
public void testBatchIterationIncompleteBatch() throws IOException {
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord firstBatchRecord = new SimpleRecord(100L, "foo".getBytes());
        SimpleRecord secondBatchRecord = new SimpleRecord(200L, "bar".getBytes());
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
        fileRecords.flush();
        fileRecords.truncateTo(fileRecords.sizeInBytes() - 13);
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertNoProducerData(firstBatch);
        assertGenericRecordBatchData(firstBatch, 0L, 100L, firstBatchRecord);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) Test(org.junit.Test)

Example 9 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.

the class FileLogInputStreamTest method testBatchIterationWithMultipleRecordsPerBatch.

@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testBatchIterationWithMultipleRecordsPerBatch(Args args) throws IOException {
    CompressionType compression = args.compression;
    byte magic = args.magic;
    if (magic < MAGIC_VALUE_V2 && compression == CompressionType.NONE)
        return;
    if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
        return;
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
        SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecords));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecords));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertNoProducerData(firstBatch);
        assertGenericRecordBatchData(args, firstBatch, 0L, 3241324L, firstBatchRecords);
        FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
        assertNoProducerData(secondBatch);
        assertGenericRecordBatchData(args, secondBatch, 1L, 238423489L, secondBatchRecords);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 10 with FileChannelRecordBatch

use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.

the class FileLogInputStreamTest method testSimpleBatchIteration.

@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testSimpleBatchIteration(Args args) throws IOException {
    CompressionType compression = args.compression;
    byte magic = args.magic;
    if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
        return;
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        SimpleRecord firstBatchRecord = new SimpleRecord(3241324L, "a".getBytes(), "foo".getBytes());
        SimpleRecord secondBatchRecord = new SimpleRecord(234280L, "b".getBytes(), "bar".getBytes());
        fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
        fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
        fileRecords.flush();
        FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
        FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
        assertGenericRecordBatchData(args, firstBatch, 0L, 3241324L, firstBatchRecord);
        assertNoProducerData(firstBatch);
        FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
        assertGenericRecordBatchData(args, secondBatch, 1L, 234280L, secondBatchRecord);
        assertNoProducerData(secondBatch);
        assertNull(logInputStream.nextBatch());
    }
}
Also used : FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Aggregations

FileChannelRecordBatch (org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch)12 Test (org.junit.Test)5 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)5 ArgumentsSource (org.junit.jupiter.params.provider.ArgumentsSource)5 ByteBuffer (java.nio.ByteBuffer)2 File (java.io.File)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 Iterator (java.util.Iterator)1 List (java.util.List)1 OptionalInt (java.util.OptionalInt)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 LeaderChangeMessage (org.apache.kafka.common.message.LeaderChangeMessage)1 ByteBufferAccessor (org.apache.kafka.common.protocol.ByteBufferAccessor)1 ControlRecordType (org.apache.kafka.common.record.ControlRecordType)1 FileRecords (org.apache.kafka.common.record.FileRecords)1 Record (org.apache.kafka.common.record.Record)1 LogContext (org.apache.kafka.common.utils.LogContext)1 Time (org.apache.kafka.common.utils.Time)1 MetadataRecordSerde (org.apache.kafka.metadata.MetadataRecordSerde)1