use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class SnapshotFileReader method handleNextBatch.
private void handleNextBatch() {
if (!batchIterator.hasNext()) {
beginShutdown("done");
return;
}
FileChannelRecordBatch batch = batchIterator.next();
if (batch.isControlBatch()) {
handleControlBatch(batch);
} else {
handleMetadataBatch(batch);
}
scheduleHandleNextBatch();
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class FileLogInputStreamTest method testBatchIterationV2.
@Test
public void testBatchIterationV2() throws IOException {
if (magic != MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
long producerId = 83843L;
short producerEpoch = 15;
int baseSequence = 234;
int partitionLeaderEpoch = 9832;
SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
fileRecords.append(MemoryRecords.withIdempotentRecords(magic, 15L, compression, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, firstBatchRecords));
fileRecords.append(MemoryRecords.withTransactionalRecords(magic, 27L, compression, producerId, producerEpoch, baseSequence + firstBatchRecords.length, partitionLeaderEpoch, secondBatchRecords));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertProducerData(firstBatch, producerId, producerEpoch, baseSequence, false, firstBatchRecords);
assertGenericRecordBatchData(firstBatch, 15L, 3241324L, firstBatchRecords);
assertEquals(partitionLeaderEpoch, firstBatch.partitionLeaderEpoch());
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertProducerData(secondBatch, producerId, producerEpoch, baseSequence + firstBatchRecords.length, true, secondBatchRecords);
assertGenericRecordBatchData(secondBatch, 27L, 238423489L, secondBatchRecords);
assertEquals(partitionLeaderEpoch, secondBatch.partitionLeaderEpoch());
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class FileLogInputStreamTest method testBatchIterationIncompleteBatch.
@Test
public void testBatchIterationIncompleteBatch() throws IOException {
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord firstBatchRecord = new SimpleRecord(100L, "foo".getBytes());
SimpleRecord secondBatchRecord = new SimpleRecord(200L, "bar".getBytes());
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
fileRecords.flush();
fileRecords.truncateTo(fileRecords.sizeInBytes() - 13);
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertNoProducerData(firstBatch);
assertGenericRecordBatchData(firstBatch, 0L, 100L, firstBatchRecord);
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class FileLogInputStreamTest method testBatchIterationWithMultipleRecordsPerBatch.
@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testBatchIterationWithMultipleRecordsPerBatch(Args args) throws IOException {
CompressionType compression = args.compression;
byte magic = args.magic;
if (magic < MAGIC_VALUE_V2 && compression == CompressionType.NONE)
return;
if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecords));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecords));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertNoProducerData(firstBatch);
assertGenericRecordBatchData(args, firstBatch, 0L, 3241324L, firstBatchRecords);
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertNoProducerData(secondBatch);
assertGenericRecordBatchData(args, secondBatch, 1L, 238423489L, secondBatchRecords);
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class FileLogInputStreamTest method testSimpleBatchIteration.
@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testSimpleBatchIteration(Args args) throws IOException {
CompressionType compression = args.compression;
byte magic = args.magic;
if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord firstBatchRecord = new SimpleRecord(3241324L, "a".getBytes(), "foo".getBytes());
SimpleRecord secondBatchRecord = new SimpleRecord(234280L, "b".getBytes(), "bar".getBytes());
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertGenericRecordBatchData(args, firstBatch, 0L, 3241324L, firstBatchRecord);
assertNoProducerData(firstBatch);
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertGenericRecordBatchData(args, secondBatch, 1L, 234280L, secondBatchRecord);
assertNoProducerData(secondBatch);
assertNull(logInputStream.nextBatch());
}
}
Aggregations