use of org.apache.kafka.common.record.CompressionType in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatchTest method testSplitPreservesHeaders.
@Test
public void testSplitPreservesHeaders() {
for (CompressionType compressionType : CompressionType.values()) {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
Header header = new RecordHeader("header-key", "header-value".getBytes());
while (true) {
FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), new Header[] { header }, null, now);
if (future == null) {
break;
}
}
Deque<ProducerBatch> batches = batch.split(200);
assertTrue("This batch should be split to multiple small batches.", batches.size() >= 2);
for (ProducerBatch splitProducerBatch : batches) {
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
for (Record record : splitBatch) {
assertTrue("Header size should be 1.", record.headers().length == 1);
assertTrue("Header key should be 'header-key'.", record.headers()[0].key().equals("header-key"));
assertTrue("Header value should be 'header-value'.", new String(record.headers()[0].value()).equals("header-value"));
}
}
}
}
}
use of org.apache.kafka.common.record.CompressionType in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatchTest method testSplitPreservesMagicAndCompressionType.
@Test
public void testSplitPreservesMagicAndCompressionType() {
for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1, MAGIC_VALUE_V2)) {
for (CompressionType compressionType : CompressionType.values()) {
if (compressionType == CompressionType.NONE && magic < MAGIC_VALUE_V2)
continue;
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), magic, compressionType, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
while (true) {
FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), Record.EMPTY_HEADERS, null, now);
if (future == null)
break;
}
Deque<ProducerBatch> batches = batch.split(512);
assertTrue(batches.size() >= 2);
for (ProducerBatch splitProducerBatch : batches) {
assertEquals(magic, splitProducerBatch.magic());
assertTrue(splitProducerBatch.isSplitBatch());
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
assertEquals(magic, splitBatch.magic());
assertEquals(0L, splitBatch.baseOffset());
assertEquals(compressionType, splitBatch.compressionType());
}
}
}
}
}
Aggregations