Search in sources :

Example 11 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class ProducerBatchTest method testSplitPreservesHeaders.

@Test
public void testSplitPreservesHeaders() {
    for (CompressionType compressionType : CompressionType.values()) {
        MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 0L);
        ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
        Header header = new RecordHeader("header-key", "header-value".getBytes());
        while (true) {
            FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), new Header[] { header }, null, now);
            if (future == null) {
                break;
            }
        }
        Deque<ProducerBatch> batches = batch.split(200);
        assertTrue("This batch should be split to multiple small batches.", batches.size() >= 2);
        for (ProducerBatch splitProducerBatch : batches) {
            for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
                for (Record record : splitBatch) {
                    assertTrue("Header size should be 1.", record.headers().length == 1);
                    assertTrue("Header key should be 'header-key'.", record.headers()[0].key().equals("header-key"));
                    assertTrue("Header value should be 'header-value'.", new String(record.headers()[0].value()).equals("header-value"));
                }
            }
        }
    }
}
Also used : RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) RecordBatch(org.apache.kafka.common.record.RecordBatch) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) CompressionType(org.apache.kafka.common.record.CompressionType) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Example 12 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class RecordHeadersTest method testAdd.

@Test
public void testAdd() {
    Headers headers = new RecordHeaders();
    headers.add(new RecordHeader("key", "value".getBytes()));
    Header header = headers.iterator().next();
    assertHeader("key", "value", header);
    headers.add(new RecordHeader("key2", "value2".getBytes()));
    assertHeader("key2", "value2", headers.lastHeader("key2"));
    assertEquals(2, getCount(headers));
}
Also used : Header(org.apache.kafka.common.header.Header) Headers(org.apache.kafka.common.header.Headers) Test(org.junit.Test)

Example 13 with Header

use of org.apache.kafka.common.header.Header in project LogHub by fbacchella.

the class Kafka method run.

@Override
public void run() {
    consumer.subscribe(Collections.singletonList(topic));
    boolean broke = false;
    while (!isInterrupted()) {
        ConsumerRecords<Long, byte[]> consumerRecords = consumer.poll(100);
        if (consumerRecords.count() == 0) {
            continue;
        }
        for (ConsumerRecord<Long, byte[]> record : consumerRecords) {
            ConnectionContext ctxt = new KafkaContext(record.topic());
            Event event = emptyEvent(ctxt);
            if (record.timestampType() == TimestampType.CREATE_TIME) {
                event.setTimestamp(new Date(record.timestamp()));
            }
            Header[] headers = record.headers().toArray();
            if (headers.length > 0) {
                Map<String, byte[]> headersMap = new HashMap<>(headers.length);
                Arrays.stream(headers).forEach(i -> headersMap.put(i.key(), i.value()));
                event.put("headers", headersMap);
            }
            byte[] content = record.value();
            try {
                event.putAll(decoder.decode(ctxt, content, 0, content.length));
                send(event);
            } catch (DecodeException e) {
                logger.error(e.getMessage());
                logger.catching(e);
            }
            if (isInterrupted()) {
                consumer.commitSync(Collections.singletonMap(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset())));
                broke = true;
                break;
            }
        }
        if (!broke) {
            consumer.commitAsync();
        } else {
            break;
        }
    }
    consumer.close();
}
Also used : HashMap(java.util.HashMap) DecodeException(loghub.Decoder.DecodeException) Date(java.util.Date) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Event(loghub.Event) ConnectionContext(loghub.ConnectionContext)

Aggregations

Header (org.apache.kafka.common.header.Header)13 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)9 Test (org.junit.Test)6 ByteBuffer (java.nio.ByteBuffer)5 TopicPartition (org.apache.kafka.common.TopicPartition)4 DataOutputStream (java.io.DataOutputStream)3 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)3 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)2 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)2 BufferUnderflowException (java.nio.BufferUnderflowException)1 Date (java.util.Date)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 ExecutionException (java.util.concurrent.ExecutionException)1 ConnectionContext (loghub.ConnectionContext)1 DecodeException (loghub.Decoder.DecodeException)1 Event (loghub.Event)1 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)1 OffsetCommitCallback (org.apache.kafka.clients.consumer.OffsetCommitCallback)1