use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class FileLogInputStreamTest method testWriteTo.
@Test
public void testWriteTo() throws IOException {
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
fileRecords.append(MemoryRecords.withRecords(magic, compression, new SimpleRecord("foo".getBytes())));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
FileChannelRecordBatch batch = logInputStream.nextBatch();
assertNotNull(batch);
assertEquals(magic, batch.magic());
ByteBuffer buffer = ByteBuffer.allocate(128);
batch.writeTo(buffer);
buffer.flip();
MemoryRecords memRecords = MemoryRecords.readableRecords(buffer);
List<Record> records = Utils.toList(memRecords.records().iterator());
assertEquals(1, records.size());
Record record0 = records.get(0);
assertTrue(record0.hasMagic(magic));
assertEquals("foo", Utils.utf8(record0.value(), record0.valueSize()));
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class FileLogInputStreamTest method testSimpleBatchIteration.
@Test
public void testSimpleBatchIteration() throws IOException {
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord firstBatchRecord = new SimpleRecord(3241324L, "a".getBytes(), "foo".getBytes());
SimpleRecord secondBatchRecord = new SimpleRecord(234280L, "b".getBytes(), "bar".getBytes());
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecord);
assertNoProducerData(firstBatch);
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertGenericRecordBatchData(secondBatch, 1L, 234280L, secondBatchRecord);
assertNoProducerData(secondBatch);
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project apache-kafka-on-k8s by banzaicloud.
the class FileLogInputStreamTest method testBatchIterationWithMultipleRecordsPerBatch.
@Test
public void testBatchIterationWithMultipleRecordsPerBatch() throws IOException {
if (magic < MAGIC_VALUE_V2 && compression == CompressionType.NONE)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord[] firstBatchRecords = new SimpleRecord[] { new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) };
SimpleRecord[] secondBatchRecords = new SimpleRecord[] { new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) };
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecords));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecords));
fileRecords.flush();
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertNoProducerData(firstBatch);
assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecords);
FileChannelRecordBatch secondBatch = logInputStream.nextBatch();
assertNoProducerData(secondBatch);
assertGenericRecordBatchData(secondBatch, 1L, 238423489L, secondBatchRecords);
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class FileLogInputStreamTest method testBatchIterationIncompleteBatch.
@ParameterizedTest
@ArgumentsSource(FileLogInputStreamArgumentsProvider.class)
public void testBatchIterationIncompleteBatch(Args args) throws IOException {
CompressionType compression = args.compression;
byte magic = args.magic;
if (compression == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
return;
try (FileRecords fileRecords = FileRecords.open(tempFile())) {
SimpleRecord firstBatchRecord = new SimpleRecord(100L, "foo".getBytes());
SimpleRecord secondBatchRecord = new SimpleRecord(200L, "bar".getBytes());
fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord));
fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord));
fileRecords.flush();
fileRecords.truncateTo(fileRecords.sizeInBytes() - 13);
FileLogInputStream logInputStream = new FileLogInputStream(fileRecords, 0, fileRecords.sizeInBytes());
FileChannelRecordBatch firstBatch = logInputStream.nextBatch();
assertNoProducerData(firstBatch);
assertGenericRecordBatchData(args, firstBatch, 0L, 100L, firstBatchRecord);
assertNull(logInputStream.nextBatch());
}
}
use of org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch in project kafka by apache.
the class SnapshotFileReader method handleMetadataBatch.
private void handleMetadataBatch(FileChannelRecordBatch batch) {
List<ApiMessageAndVersion> messages = new ArrayList<>();
for (Record record : batch) {
ByteBufferAccessor accessor = new ByteBufferAccessor(record.value());
try {
ApiMessageAndVersion messageAndVersion = serde.read(accessor, record.valueSize());
messages.add(messageAndVersion);
} catch (Throwable e) {
log.error("unable to read metadata record at offset {}", record.offset(), e);
}
}
listener.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(batch.baseOffset(), batch.partitionLeaderEpoch(), batch.maxTimestamp(), batch.sizeInBytes(), messages)), reader -> {
}));
}
Aggregations