use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class ChangeLoggingTimestampedWindowBytesStoreTest method shouldLogPutsWithPosition.
@Test
public void shouldLogPutsWithPosition() {
EasyMock.expect(inner.getPosition()).andReturn(POSITION).anyTimes();
inner.put(bytesKey, valueAndTimestamp, 0);
EasyMock.expectLastCall();
init();
final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0);
EasyMock.reset(context);
final RecordMetadata recordContext = new ProcessorRecordContext(0L, 1L, 0, "", new RecordHeaders());
EasyMock.expect(context.recordMetadata()).andStubReturn(Optional.of(recordContext));
final Position position = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 1L)))));
context.logChange(store.name(), key, value, 42, position);
EasyMock.replay(context);
store.put(bytesKey, valueAndTimestamp, context.timestamp());
EasyMock.verify(inner, context);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MockProcessorContextTest method shouldCaptureApplicationAndRecordMetadata.
@Test
public void shouldCaptureApplicationAndRecordMetadata() {
final Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "testMetadata");
final org.apache.kafka.streams.processor.AbstractProcessor<String, Object> processor = new org.apache.kafka.streams.processor.AbstractProcessor<String, Object>() {
@Override
public void process(final String key, final Object value) {
context().forward("appId", context().applicationId());
context().forward("taskId", context().taskId());
context().forward("topic", context().topic());
context().forward("partition", context().partition());
context().forward("offset", context().offset());
context().forward("timestamp", context().timestamp());
context().forward("key", key);
context().forward("value", value);
}
};
final MockProcessorContext context = new MockProcessorContext(config);
processor.init(context);
try {
processor.process("foo", 5L);
fail("Should have thrown an exception.");
} catch (final IllegalStateException expected) {
// expected, since the record metadata isn't initialized
}
context.resetForwards();
context.setRecordMetadata("t1", 0, 0L, new RecordHeaders(), 0L);
{
processor.process("foo", 5L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 0), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 0L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 0L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "foo"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 5L), forwarded.next().keyValue());
}
context.resetForwards();
// record metadata should be "sticky"
context.setOffset(1L);
context.setRecordTimestamp(10L);
context.setCurrentSystemTimeMs(20L);
context.setCurrentStreamTimeMs(30L);
{
processor.process("bar", 50L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 0), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 1L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 10L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "bar"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 50L), forwarded.next().keyValue());
assertEquals(20L, context.currentSystemTimeMs());
assertEquals(30L, context.currentStreamTimeMs());
}
context.resetForwards();
// record metadata should be "sticky"
context.setTopic("t2");
context.setPartition(30);
{
processor.process("baz", 500L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t2"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 30), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 1L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 10L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "baz"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 500L), forwarded.next().keyValue());
}
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TestTopicsTest method testWithHeaders.
@Test
public void testWithHeaders() {
long baseTime = 3;
final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) });
final TestInputTopic<Long, String> inputTopic = testDriver.createInputTopic(INPUT_TOPIC, longSerde.serializer(), stringSerde.serializer());
final TestOutputTopic<Long, String> outputTopic = testDriver.createOutputTopic(OUTPUT_TOPIC, longSerde.deserializer(), stringSerde.deserializer());
inputTopic.pipeInput(new TestRecord<>(1L, "Hello", headers));
assertThat(outputTopic.readRecord(), allOf(hasProperty("key", equalTo(1L)), hasProperty("value", equalTo("Hello")), hasProperty("headers", equalTo(headers))));
inputTopic.pipeInput(new TestRecord<>(2L, "Kafka", headers, ++baseTime));
assertThat(outputTopic.readRecord(), is(equalTo(new TestRecord<>(2L, "Kafka", headers, baseTime))));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TopologyTestDriverTest method shouldSendRecordViaCorrectSourceTopic.
@Test
public void shouldSendRecordViaCorrectSourceTopic() {
testDriver = new TopologyTestDriver(setupMultipleSourceTopology(SOURCE_TOPIC_1, SOURCE_TOPIC_2), config);
final List<TTDTestRecord> processedRecords1 = mockProcessors.get(0).processedRecords;
final List<TTDTestRecord> processedRecords2 = mockProcessors.get(1).processedRecords;
final TestInputTopic<byte[], byte[]> inputTopic1 = testDriver.createInputTopic(SOURCE_TOPIC_1, new ByteArraySerializer(), new ByteArraySerializer());
final TestInputTopic<byte[], byte[]> inputTopic2 = testDriver.createInputTopic(SOURCE_TOPIC_2, new ByteArraySerializer(), new ByteArraySerializer());
inputTopic1.pipeInput(new TestRecord<>(key1, value1, headers, timestamp1));
assertEquals(1, processedRecords1.size());
assertEquals(0, processedRecords2.size());
TTDTestRecord record = processedRecords1.get(0);
TTDTestRecord expectedResult = new TTDTestRecord(key1, value1, headers, timestamp1, 0L, SOURCE_TOPIC_1);
assertThat(record, equalTo(expectedResult));
inputTopic2.pipeInput(new TestRecord<>(key2, value2, Instant.ofEpochMilli(timestamp2)));
assertEquals(1, processedRecords1.size());
assertEquals(1, processedRecords2.size());
record = processedRecords2.get(0);
expectedResult = new TTDTestRecord(key2, value2, new RecordHeaders((Iterable<Header>) null), timestamp2, 0L, SOURCE_TOPIC_2);
assertThat(record, equalTo(expectedResult));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project pinpoint by naver.
the class ProducerAddHeaderInterceptorTest method beforeWhenUnsampled.
@Test
public void beforeWhenUnsampled() {
doReturn(profilerConfig).when(traceContext).getProfilerConfig();
doReturn(true).when(profilerConfig).readBoolean(KafkaConfig.HEADER_ENABLE, true);
doReturn(trace).when(traceContext).currentRawTraceObject();
doReturn(false).when(trace).canSampled();
doReturn(recorder).when(trace).currentSpanEventRecorder();
doReturn(apiVersions).when(apiVersionsGetter)._$PINPOINT$_getApiVersions();
doReturn(RecordBatch.MAGIC_VALUE_V2).when(apiVersions).maxUsableProduceMagic();
ProducerAddHeaderInterceptor interceptor = new ProducerAddHeaderInterceptor(traceContext);
RecordHeaders recordHeader = new RecordHeaders();
Object[] args = new Object[] { recordHeader };
interceptor.before(apiVersionsGetter, args);
Header[] headers = recordHeader.toArray();
Assert.assertEquals(1, headers.length);
}
Aggregations