use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace.
@Test
public void shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace() {
setup(false);
final Processor<String, String, Windowed<String>, Change<Long>> processor = new KStreamSessionWindowAggregate<>(SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(0L)), STORE_NAME, initializer, aggregator, sessionMerger).get();
processor.init(context);
// dummy record to establish stream time = 0
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("dummy", "dummy", 0L));
// record arrives on time, should not be skipped
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("OnTime1", "1", 0L));
// dummy record to advance stream time = 11, 10 for gap time plus 1 to place outside window
context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("dummy", "dummy", 11L));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
// record is late
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("Late1", "1", 0L));
assertThat(appender.getMessages(), hasItem("Skipping record for expired window." + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[11]"));
}
final MetricName dropTotal;
final MetricName dropRate;
dropTotal = new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
dropRate = new MetricName("dropped-records-rate", "stream-task-metrics", "The average number of dropped records per second", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
assertThat(metrics.metrics().get(dropTotal).metricValue(), is(1.0));
assertThat((Double) metrics.metrics().get(dropRate).metricValue(), greaterThan(0.0));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MemoryRecordsTest method testHasRoomForMethodWithHeaders.
@ParameterizedTest
@ArgumentsSource(MemoryRecordsArgumentsProvider.class)
public void testHasRoomForMethodWithHeaders(Args args) {
byte magic = args.magic;
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(120), magic, args.compression, TimestampType.CREATE_TIME, 0L);
builder.append(logAppendTime, "key".getBytes(), "value".getBytes());
RecordHeaders headers = new RecordHeaders();
for (int i = 0; i < 10; ++i) headers.add("hello", "world.world".getBytes());
// Make sure that hasRoomFor accounts for header sizes by letting a record without headers pass, but stopping
// a record with a large number of headers.
assertTrue(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS));
if (magic < MAGIC_VALUE_V2)
assertTrue(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), headers.toArray()));
else
assertFalse(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), headers.toArray()));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSourceTask method convertHeaderFor.
private RecordHeaders convertHeaderFor(SourceRecord record) {
Headers headers = record.headers();
RecordHeaders result = new RecordHeaders();
if (headers != null) {
String topic = record.topic();
for (Header header : headers) {
String key = header.key();
byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value());
result.add(key, rawHeader);
}
}
return result;
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class ProcessorContextImpl method logChange.
@Override
public void logChange(final String storeName, final Bytes key, final byte[] value, final long timestamp, final Position position) {
throwUnsupportedOperationExceptionIfStandby("logChange");
final TopicPartition changelogPartition = stateManager().registeredChangelogPartitionFor(storeName);
final Headers headers;
if (!consistencyEnabled) {
headers = null;
} else {
// Add the vector clock to the header part of every record
headers = new RecordHeaders();
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
}
collector.send(changelogPartition.topic(), key, value, headers, changelogPartition.partition(), timestamp, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class ProcessorRecordContext method deserialize.
public static ProcessorRecordContext deserialize(final ByteBuffer buffer) {
final long timestamp = buffer.getLong();
final long offset = buffer.getLong();
final String topic;
{
// we believe the topic will never be null when we serialize
final byte[] topicBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
topic = new String(topicBytes, UTF_8);
}
final int partition = buffer.getInt();
final int headerCount = buffer.getInt();
final Headers headers;
if (headerCount == -1) {
// keep for backward compatibilty
headers = new RecordHeaders();
} else {
final Header[] headerArr = new Header[headerCount];
for (int i = 0; i < headerCount; i++) {
final byte[] keyBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
final byte[] valueBytes = getNullableSizePrefixedArray(buffer);
headerArr[i] = new RecordHeader(new String(keyBytes, UTF_8), valueBytes);
}
headers = new RecordHeaders(headerArr);
}
return new ProcessorRecordContext(timestamp, offset, partition, topic, headers);
}
Aggregations