use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class FileLogInputStreamTest method testWriteTo.
@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testWriteTo(Args args) throws IOException {
CompressionType compression = args.compression;
byte magic = args.magic;
if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
fileRecords.append(MemoryRecords.withRecords(magic, compression, new SimpleRecord("foo".getBytes())));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
FileChannelRecordBatch batch = logInputStream.nextBatch();
assertNotNull(batch);
assertEquals(magic, batch.magic());
ByteBuffer buffer = ByteBuffer.allocate(128);
batch.writeTo(buffer);
buffer.flip();
MemoryRecords memRecords = MemoryRecords.readableRecords(buffer);
List<Record> records = Utils.toList(memRecords.records().iterator());
assertEquals(1, records.size());
Record record0 = records.get(0);
assertTrue(record0.hasMagic(magic));
assertEquals("foo", Utils.utf8(record0.value(), record0.valueSize()));
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class FileLogInputStreamTest method testBatchIterationV2.
@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testBatchIterationV2(Args args) throws IOException {
CompressionType compression = args.compression;
byte magic = args.magic;
if (magic != MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
long producerId = 83843L;
short producerEpoch = 15;
int baseSequence = 234;
int partitionLeaderEpoch = 9832;
SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
fileRecords.append(MemoryRecords.withIdempotentRecords(magic, 15L, compression, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, firstBatchRecords));
fileRecords.append(MemoryRecords.withTransactionalRecords(magic, 27L, compression, producerId, producerEpoch, baseSequence + firstBatchRecords.length, partitionLeaderEpoch, secondBatchRecords));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertProducerData(firstBatch, producerId, producerEpoch, baseSequence, false, firstBatchRecords);
assertGenericRecordBatchData(args, firstBatch, 15L, 3241324L, firstBatchRecords);
assertEquals(partitionLeaderEpoch, firstBatch.partitionLeaderEpoch());
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertProducerData(secondBatch, producerId, producerEpoch, baseSequence + firstBatchRecords.length, true, secondBatchRecords);
assertGenericRecordBatchData(args, secondBatch, 27L, 238423489L, secondBatchRecords);
assertEquals(partitionLeaderEpoch, secondBatch.partitionLeaderEpoch());
assertNull(logInputStream.nextBatch());
}
}
Aggregations