use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatchTest method testSplitPreservesHeaders.
@Test
public void testSplitPreservesHeaders() {
for (CompressionType compressionType : CompressionType.values()) {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
Header header = new RecordHeader("header-key", "header-value".getBytes());
while (true) {
FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), new Header[] { header }, null, now);
if (future == null) {
break;
}
}
Deque<ProducerBatch> batches = batch.split(200);
assertTrue("This batch should be split to multiple small batches.", batches.size() >= 2);
for (ProducerBatch splitProducerBatch : batches) {
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
for (Record record : splitBatch) {
assertTrue("Header size should be 1.", record.headers().length == 1);
assertTrue("Header key should be 'header-key'.", record.headers()[0].key().equals("header-key"));
assertTrue("Header value should be 'header-value'.", new String(record.headers()[0].value()).equals("header-value"));
}
}
}
}
}
use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class RecordHeadersTest method testAdd.
@Test
public void testAdd() {
Headers headers = new RecordHeaders();
headers.add(new RecordHeader("key", "value".getBytes()));
Header header = headers.iterator().next();
assertHeader("key", "value", header);
headers.add(new RecordHeader("key2", "value2".getBytes()));
assertHeader("key2", "value2", headers.lastHeader("key2"));
assertEquals(2, getCount(headers));
}
use of org.apache.kafka.common.header.Header in project LogHub by fbacchella.
the class Kafka method run.
@Override
public void run() {
consumer.subscribe(Collections.singletonList(topic));
boolean broke = false;
while (!isInterrupted()) {
ConsumerRecords<Long, byte[]> consumerRecords = consumer.poll(100);
if (consumerRecords.count() == 0) {
continue;
}
for (ConsumerRecord<Long, byte[]> record : consumerRecords) {
ConnectionContext ctxt = new KafkaContext(record.topic());
Event event = emptyEvent(ctxt);
if (record.timestampType() == TimestampType.CREATE_TIME) {
event.setTimestamp(new Date(record.timestamp()));
}
Header[] headers = record.headers().toArray();
if (headers.length > 0) {
Map<String, byte[]> headersMap = new HashMap<>(headers.length);
Arrays.stream(headers).forEach(i -> headersMap.put(i.key(), i.value()));
event.put("headers", headersMap);
}
byte[] content = record.value();
try {
event.putAll(decoder.decode(ctxt, content, 0, content.length));
send(event);
} catch (DecodeException e) {
logger.error(e.getMessage());
logger.catching(e);
}
if (isInterrupted()) {
consumer.commitSync(Collections.singletonMap(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset())));
broke = true;
break;
}
}
if (!broke) {
consumer.commitAsync();
} else {
break;
}
}
consumer.close();
}
Aggregations