Search in sources :

Example 26 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.

the class AsyncHandlingAutoCommitKafkaConsumerTest method createRecordWithElapsedTtl.

private ConsumerRecord<String, Buffer> createRecordWithElapsedTtl() {
    final byte[] ttl1Second = "1".getBytes();
    final RecordHeader ttl = new RecordHeader("ttl", ttl1Second);
    final byte[] timestamp2SecondsAgo = Json.encode(Instant.now().minusSeconds(2).toEpochMilli()).getBytes();
    final RecordHeader creationTime = new RecordHeader("creation-time", timestamp2SecondsAgo);
    return new ConsumerRecord<>(TOPIC, PARTITION, 0, ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM, ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, "key_0", Buffer.buffer(), new RecordHeaders(new Header[] { ttl, creationTime }));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 27 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.

the class HonoKafkaConsumerTest method createRecordWithElapsedTtl.

private ConsumerRecord<String, Buffer> createRecordWithElapsedTtl() {
    final byte[] ttl1Second = "1".getBytes();
    final RecordHeader ttl = new RecordHeader("ttl", ttl1Second);
    final byte[] timestamp2SecondsAgo = Json.encode(Instant.now().minusSeconds(2).toEpochMilli()).getBytes();
    final RecordHeader creationTime = new RecordHeader("creation-time", timestamp2SecondsAgo);
    return new ConsumerRecord<>(TOPIC, PARTITION, 0, ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM, ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, "key_0", Buffer.buffer(), new RecordHeaders(new Header[] { ttl, creationTime }));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) KafkaConsumerRecord(io.vertx.kafka.client.consumer.KafkaConsumerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 28 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project thingsboard by thingsboard.

the class TbKafkaNode method publish.

protected void publish(TbContext ctx, TbMsg msg, String topic) {
    try {
        if (!addMetadataKeyValuesAsKafkaHeaders) {
            // TODO: external system executor
            producer.send(new ProducerRecord<>(topic, msg.getData()), (metadata, e) -> processRecord(ctx, msg, metadata, e));
        } else {
            Headers headers = new RecordHeaders();
            msg.getMetaData().values().forEach((key, value) -> headers.add(new RecordHeader(TB_MSG_MD_PREFIX + key, value.getBytes(toBytesCharset))));
            producer.send(new ProducerRecord<>(topic, null, null, null, msg.getData(), headers), (metadata, e) -> processRecord(ctx, msg, metadata, e));
        }
    } catch (Exception e) {
        log.debug("[{}] Failed to process message: {}", ctx.getSelfId(), msg, e);
    }
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) TbNodeException(org.thingsboard.rule.engine.api.TbNodeException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 29 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.

the class KafkaClientUnitTestHelper method assertUniqueHeaderWithExpectedValue.

/**
 * Asserts existence of a unique header value.
 *
 * @param headers The headers to check.
 * @param key The name of the header.
 * @param expectedValue The expected value.
 * @throws NullPointerException if any of the parameters are {@code null}.
 * @throws AssertionError if the headers do not contain a single occurrence of the given key with
 *                        the given value.
 */
public static void assertUniqueHeaderWithExpectedValue(final Headers headers, final String key, final Object expectedValue) {
    Objects.requireNonNull(headers);
    Objects.requireNonNull(key);
    Objects.requireNonNull(expectedValue);
    final String encodedValue;
    if (expectedValue instanceof String) {
        encodedValue = (String) expectedValue;
    } else {
        encodedValue = Json.encode(expectedValue);
    }
    assertThat(headers.headers(key)).hasSize(1);
    assertThat(headers).contains(new RecordHeader(key, encodedValue.getBytes(StandardCharsets.UTF_8)));
}
Also used : RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Example 30 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecord method readHeaders.

private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) {
    Header[] headers = new Header[numHeaders];
    for (int i = 0; i < numHeaders; i++) {
        int headerKeySize = ByteUtils.readVarint(buffer);
        if (headerKeySize < 0)
            throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
        String headerKey = Utils.utf8(buffer, headerKeySize);
        buffer.position(buffer.position() + headerKeySize);
        ByteBuffer headerValue = null;
        int headerValueSize = ByteUtils.readVarint(buffer);
        if (headerValueSize >= 0) {
            headerValue = buffer.slice();
            headerValue.limit(headerValueSize);
            buffer.position(buffer.position() + headerValueSize);
        }
        headers[i] = new RecordHeader(headerKey, headerValue);
    }
    return headers;
}
Also used : RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ByteBuffer(java.nio.ByteBuffer) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Aggregations

RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)45 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)26 Header (org.apache.kafka.common.header.Header)21 Test (org.junit.Test)17 Headers (org.apache.kafka.common.header.Headers)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)15 ByteBuffer (java.nio.ByteBuffer)10 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ArrayList (java.util.ArrayList)7 DataOutputStream (java.io.DataOutputStream)6 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)6 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)5 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)5 Position (org.apache.kafka.streams.query.Position)5 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)5 LinkedList (java.util.LinkedList)4 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)4 Change (org.apache.kafka.streams.kstream.internals.Change)4 Eviction (org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction)4