use of org.apache.kafka.streams.processor.api.ProcessorContext in project kafka by apache.
the class KStreamPrintTest method setUp.
@Before
public void setUp() {
byteOutStream = new ByteArrayOutputStream();
final KStreamPrint<Integer, String> kStreamPrint = new KStreamPrint<>(new PrintForeachAction<>(byteOutStream, (key, value) -> String.format("%d, %s", key, value), "test-stream"));
printProcessor = kStreamPrint.get();
final ProcessorContext<Void, Void> processorContext = EasyMock.createNiceMock(ProcessorContext.class);
EasyMock.replay(processorContext);
printProcessor.init(processorContext);
}
use of org.apache.kafka.streams.processor.api.ProcessorContext in project kafka by apache.
the class GraphGraceSearchUtilTest method shouldExtractGraceFromSessionAncestorThroughStatefulParent.
@Test
public void shouldExtractGraceFromSessionAncestorThroughStatefulParent() {
final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L));
final StatefulProcessorNode<String, Long> graceGrandparent = new StatefulProcessorNode<>("asdf", new ProcessorParameters<>(new KStreamSessionWindowAggregate<String, Long, Integer>(windows, "asdf", null, null, null), "asdf"), (StoreBuilder<?>) null);
final StatefulProcessorNode<String, Long> statefulParent = new StatefulProcessorNode<>("stateful", new ProcessorParameters<>(() -> new Processor<String, Long, String, Long>() {
@Override
public void init(final ProcessorContext<String, Long> context) {
}
@Override
public void process(final Record<String, Long> record) {
}
@Override
public void close() {
}
}, "dummy"), (StoreBuilder<?>) null);
graceGrandparent.addChild(statefulParent);
final ProcessorGraphNode<String, Long> node = new ProcessorGraphNode<>("stateless", null);
statefulParent.addChild(node);
final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node);
assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap()));
}
use of org.apache.kafka.streams.processor.api.ProcessorContext in project kafka by apache.
the class SubscriptionStoreReceiveProcessorSupplier method get.
@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {
private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;
private Sensor droppedRecordsSensor;
@Override
public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
super.init(context);
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
store = internalProcessorContext.getStateStore(storeBuilder);
keySchema.init(context);
}
@Override
public void process(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null) {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
} else {
LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
}
droppedRecordsSensor.record();
return;
}
if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
// from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
// This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
store.delete(subscriptionKey);
} else {
store.put(subscriptionKey, newValue);
}
final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
// note: key is non-nullable
// note: newValue is non-nullable
context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
}
};
}
use of org.apache.kafka.streams.processor.api.ProcessorContext in project kafka by apache.
the class KafkaStreamsTest method statelessTopologyShouldNotCreateStateDirectory.
@Test
public void statelessTopologyShouldNotCreateStateDirectory() throws Exception {
final String safeTestName = safeUniqueTestName(getClass(), testName);
final String inputTopic = safeTestName + "-input";
final String outputTopic = safeTestName + "-output";
final Topology topology = new Topology();
topology.addSource("source", Serdes.String().deserializer(), Serdes.String().deserializer(), inputTopic).addProcessor("process", () -> new Processor<String, String, String, String>() {
private ProcessorContext<String, String> context;
@Override
public void init(final ProcessorContext<String, String> context) {
this.context = context;
}
@Override
public void process(final Record<String, String> record) {
if (record.value().length() % 2 == 0) {
context.forward(record.withValue(record.key() + record.value()));
}
}
}, "source").addSink("sink", outputTopic, new StringSerializer(), new StringSerializer(), "process");
startStreamsAndCheckDirExists(topology, false);
}
use of org.apache.kafka.streams.processor.api.ProcessorContext in project kafka by apache.
the class MockProcessorContextAPITest method shouldCaptureApplicationAndRecordMetadata.
@Test
public void shouldCaptureApplicationAndRecordMetadata() {
final Properties config = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "testMetadata"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "")));
final Processor<String, Object, String, Object> processor = new Processor<String, Object, String, Object>() {
private ProcessorContext<String, Object> context;
@Override
public void init(final ProcessorContext<String, Object> context) {
this.context = context;
}
@Override
public void process(final Record<String, Object> record) {
context.forward(new Record<String, Object>("appId", context.applicationId(), 0L));
context.forward(new Record<String, Object>("taskId", context.taskId(), 0L));
if (context.recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context.recordMetadata().get();
context.forward(new Record<String, Object>("topic", recordMetadata.topic(), 0L));
context.forward(new Record<String, Object>("partition", recordMetadata.partition(), 0L));
context.forward(new Record<String, Object>("offset", recordMetadata.offset(), 0L));
}
context.forward(new Record<String, Object>("record", record, 0L));
}
};
final MockProcessorContext<String, Object> context = new MockProcessorContext<>(config);
processor.init(context);
processor.process(new Record<>("foo", 5L, 0L));
{
final List<CapturedForward<? extends String, ?>> forwarded = context.forwarded();
final List<CapturedForward<? extends String, ?>> expected = asList(new CapturedForward<>(new Record<>("appId", "testMetadata", 0L)), new CapturedForward<>(new Record<>("taskId", new TaskId(0, 0), 0L)), new CapturedForward<>(new Record<>("record", new Record<>("foo", 5L, 0L), 0L)));
assertThat(forwarded, is(expected));
}
context.resetForwards();
context.setRecordMetadata("t1", 0, 0L);
processor.process(new Record<>("foo", 5L, 0L));
{
final List<CapturedForward<? extends String, ?>> forwarded = context.forwarded();
final List<CapturedForward<? extends String, ?>> expected = asList(new CapturedForward<>(new Record<>("appId", "testMetadata", 0L)), new CapturedForward<>(new Record<>("taskId", new TaskId(0, 0), 0L)), new CapturedForward<>(new Record<>("topic", "t1", 0L)), new CapturedForward<>(new Record<>("partition", 0, 0L)), new CapturedForward<>(new Record<>("offset", 0L, 0L)), new CapturedForward<>(new Record<>("record", new Record<>("foo", 5L, 0L), 0L)));
assertThat(forwarded, is(expected));
}
}
Aggregations