use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.
the class AsyncHandlingAutoCommitKafkaConsumerTest method createRecordWithElapsedTtl.
private ConsumerRecord<String, Buffer> createRecordWithElapsedTtl() {
final byte[] ttl1Second = "1".getBytes();
final RecordHeader ttl = new RecordHeader("ttl", ttl1Second);
final byte[] timestamp2SecondsAgo = Json.encode(Instant.now().minusSeconds(2).toEpochMilli()).getBytes();
final RecordHeader creationTime = new RecordHeader("creation-time", timestamp2SecondsAgo);
return new ConsumerRecord<>(TOPIC, PARTITION, 0, ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM, ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, "key_0", Buffer.buffer(), new RecordHeaders(new Header[] { ttl, creationTime }));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.
the class HonoKafkaConsumerTest method createRecordWithElapsedTtl.
private ConsumerRecord<String, Buffer> createRecordWithElapsedTtl() {
final byte[] ttl1Second = "1".getBytes();
final RecordHeader ttl = new RecordHeader("ttl", ttl1Second);
final byte[] timestamp2SecondsAgo = Json.encode(Instant.now().minusSeconds(2).toEpochMilli()).getBytes();
final RecordHeader creationTime = new RecordHeader("creation-time", timestamp2SecondsAgo);
return new ConsumerRecord<>(TOPIC, PARTITION, 0, ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM, ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE, "key_0", Buffer.buffer(), new RecordHeaders(new Header[] { ttl, creationTime }));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project thingsboard by thingsboard.
the class TbKafkaNode method publish.
protected void publish(TbContext ctx, TbMsg msg, String topic) {
try {
if (!addMetadataKeyValuesAsKafkaHeaders) {
// TODO: external system executor
producer.send(new ProducerRecord<>(topic, msg.getData()), (metadata, e) -> processRecord(ctx, msg, metadata, e));
} else {
Headers headers = new RecordHeaders();
msg.getMetaData().values().forEach((key, value) -> headers.add(new RecordHeader(TB_MSG_MD_PREFIX + key, value.getBytes(toBytesCharset))));
producer.send(new ProducerRecord<>(topic, null, null, null, msg.getData(), headers), (metadata, e) -> processRecord(ctx, msg, metadata, e));
}
} catch (Exception e) {
log.debug("[{}] Failed to process message: {}", ctx.getSelfId(), msg, e);
}
}
use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.
the class KafkaClientUnitTestHelper method assertUniqueHeaderWithExpectedValue.
/**
* Asserts existence of a unique header value.
*
* @param headers The headers to check.
* @param key The name of the header.
* @param expectedValue The expected value.
* @throws NullPointerException if any of the parameters are {@code null}.
* @throws AssertionError if the headers do not contain a single occurrence of the given key with
* the given value.
*/
public static void assertUniqueHeaderWithExpectedValue(final Headers headers, final String key, final Object expectedValue) {
Objects.requireNonNull(headers);
Objects.requireNonNull(key);
Objects.requireNonNull(expectedValue);
final String encodedValue;
if (expectedValue instanceof String) {
encodedValue = (String) expectedValue;
} else {
encodedValue = Json.encode(expectedValue);
}
assertThat(headers.headers(key)).hasSize(1);
assertThat(headers).contains(new RecordHeader(key, encodedValue.getBytes(StandardCharsets.UTF_8)));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecord method readHeaders.
private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) {
Header[] headers = new Header[numHeaders];
for (int i = 0; i < numHeaders; i++) {
int headerKeySize = ByteUtils.readVarint(buffer);
if (headerKeySize < 0)
throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
String headerKey = Utils.utf8(buffer, headerKeySize);
buffer.position(buffer.position() + headerKeySize);
ByteBuffer headerValue = null;
int headerValueSize = ByteUtils.readVarint(buffer);
if (headerValueSize >= 0) {
headerValue = buffer.slice();
headerValue.limit(headerValueSize);
buffer.position(buffer.position() + headerValueSize);
}
headers[i] = new RecordHeader(headerKey, headerValue);
}
return headers;
}
Aggregations