use of org.apache.kafka.streams.processor.AbstractProcessor in project apache-kafka-on-k8s by banzaicloud.
the class SimpleBenchmark method createKafkaStreamsWithStateStore.
private KafkaStreams createKafkaStreamsWithStateStore(String topic, final CountDownLatch latch, boolean enableCaching) {
setStreamProperties("simple-benchmark-streams-with-store" + enableCaching);
StreamsBuilder builder = new StreamsBuilder();
final StoreBuilder<KeyValueStore<Integer, byte[]>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("store"), Serdes.Integer(), Serdes.ByteArray());
if (enableCaching) {
builder.addStateStore(storeBuilder.withCachingEnabled());
} else {
builder.addStateStore(storeBuilder);
}
KStream<Integer, byte[]> source = builder.stream(topic, Consumed.with(INTEGER_SERDE, BYTE_SERDE));
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
KeyValueStore<Integer, byte[]> store;
@SuppressWarnings("unchecked")
@Override
public void init(ProcessorContext context) {
store = (KeyValueStore<Integer, byte[]>) context.getStateStore("store");
}
@Override
public void process(Integer key, byte[] value) {
store.put(key, value);
processedRecords.getAndIncrement();
processedBytes += value.length + Integer.SIZE;
if (processedRecords.get() == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
}, "store");
return createKafkaStreamsWithExceptionHandler(builder, props);
}
use of org.apache.kafka.streams.processor.AbstractProcessor in project kafka by apache.
the class SimpleBenchmark method createKafkaStreamsWithSink.
private KafkaStreams createKafkaStreamsWithSink(String topic, final CountDownLatch latch) {
final Properties props = setStreamProperties("simple-benchmark-streams-with-sink");
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);
source.to(INTEGER_SERDE, BYTE_SERDE, SINK_TOPIC);
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
@Override
public void init(ProcessorContext context) {
}
@Override
public void process(Integer key, byte[] value) {
processedRecords++;
processedBytes += value.length + Integer.SIZE;
if (processedRecords == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
});
return createKafkaStreamsWithExceptionHandler(builder, props);
}
use of org.apache.kafka.streams.processor.AbstractProcessor in project kafka by apache.
the class SimpleBenchmark method createKafkaStreams.
private KafkaStreams createKafkaStreams(String topic, final CountDownLatch latch) {
Properties props = setStreamProperties("simple-benchmark-streams");
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
@Override
public void init(ProcessorContext context) {
}
@Override
public void process(Integer key, byte[] value) {
processedRecords++;
processedBytes += value.length + Integer.SIZE;
if (processedRecords == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
});
return createKafkaStreamsWithExceptionHandler(builder, props);
}
use of org.apache.kafka.streams.processor.AbstractProcessor in project kafka by apache.
the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContextWhenPunctuating.
@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContextWhenPunctuating() throws Exception {
final ProcessorNode punctuator = new ProcessorNode("test", new AbstractProcessor() {
@Override
public void init(final ProcessorContext context) {
context.schedule(1);
}
@Override
public void process(final Object key, final Object value) {
//
}
@Override
public void punctuate(final long timestamp) {
throw new KafkaException("KABOOM!");
}
}, Collections.<String>emptySet());
punctuator.init(new NoOpProcessorContext());
try {
task.punctuate(punctuator, 1);
fail("Should've thrown StreamsException");
} catch (StreamsException e) {
final String message = e.getMessage();
assertTrue("message=" + message + " should contain processor", message.contains("processor=test"));
assertThat(((ProcessorContextImpl) task.processorContext()).currentNode(), nullValue());
}
}
use of org.apache.kafka.streams.processor.AbstractProcessor in project apache-kafka-on-k8s by banzaicloud.
the class MockProcessorContextTest method shouldCaptureApplicationAndRecordMetadata.
@Test
public void shouldCaptureApplicationAndRecordMetadata() {
final Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "testMetadata");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "");
final AbstractProcessor<String, Object> processor = new AbstractProcessor<String, Object>() {
@Override
public void process(final String key, final Object value) {
context().forward("appId", context().applicationId());
context().forward("taskId", context().taskId());
context().forward("topic", context().topic());
context().forward("partition", context().partition());
context().forward("offset", context().offset());
context().forward("timestamp", context().timestamp());
context().forward("key", key);
context().forward("value", value);
}
};
final MockProcessorContext context = new MockProcessorContext(config);
processor.init(context);
try {
processor.process("foo", 5L);
fail("Should have thrown an exception.");
} catch (final IllegalStateException expected) {
// expected, since the record metadata isn't initialized
}
context.resetForwards();
context.setRecordMetadata("t1", 0, 0L, 0L);
{
processor.process("foo", 5L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 0), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 0L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 0L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "foo"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 5L), forwarded.next().keyValue());
}
context.resetForwards();
// record metadata should be "sticky"
context.setOffset(1L);
context.setTimestamp(10L);
{
processor.process("bar", 50L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t1"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 0), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 1L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 10L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "bar"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 50L), forwarded.next().keyValue());
}
context.resetForwards();
// record metadata should be "sticky"
context.setTopic("t2");
context.setPartition(30);
{
processor.process("baz", 500L);
final Iterator<CapturedForward> forwarded = context.forwarded().iterator();
assertEquals(new KeyValue<>("appId", "testMetadata"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("taskId", new TaskId(0, 0)), forwarded.next().keyValue());
assertEquals(new KeyValue<>("topic", "t2"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("partition", 30), forwarded.next().keyValue());
assertEquals(new KeyValue<>("offset", 1L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("timestamp", 10L), forwarded.next().keyValue());
assertEquals(new KeyValue<>("key", "baz"), forwarded.next().keyValue());
assertEquals(new KeyValue<>("value", 500L), forwarded.next().keyValue());
}
}
Aggregations