use of org.apache.kafka.streams.processor.ProcessorContext in project kafka by apache.
the class KStreamTransformTest method testTransform.
@Test
public void testTransform() {
KStreamBuilder builder = new KStreamBuilder();
TransformerSupplier<Number, Number, KeyValue<Integer, Integer>> transformerSupplier = new TransformerSupplier<Number, Number, KeyValue<Integer, Integer>>() {
public Transformer<Number, Number, KeyValue<Integer, Integer>> get() {
return new Transformer<Number, Number, KeyValue<Integer, Integer>>() {
private int total = 0;
@Override
public void init(ProcessorContext context) {
}
@Override
public KeyValue<Integer, Integer> transform(Number key, Number value) {
total += value.intValue();
return KeyValue.pair(key.intValue() * 2, total);
}
@Override
public KeyValue<Integer, Integer> punctuate(long timestamp) {
return KeyValue.pair(-1, (int) timestamp);
}
@Override
public void close() {
}
};
}
};
final int[] expectedKeys = { 1, 10, 100, 1000 };
MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>();
KStream<Integer, Integer> stream = builder.stream(intSerde, intSerde, topicName);
stream.transform(transformerSupplier).process(processor);
driver = new KStreamTestDriver(builder);
for (int expectedKey : expectedKeys) {
driver.process(topicName, expectedKey, expectedKey * 10);
}
driver.punctuate(2);
driver.punctuate(3);
assertEquals(6, processor.processed.size());
String[] expected = { "2:10", "20:110", "200:1110", "2000:11110", "-1:2", "-1:3" };
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], processor.processed.get(i));
}
}
use of org.apache.kafka.streams.processor.ProcessorContext in project kafka by apache.
the class SimpleBenchmark method createKafkaStreamsWithStateStore.
private KafkaStreams createKafkaStreamsWithStateStore(String topic, final CountDownLatch latch, boolean enableCaching) {
Properties props = setStreamProperties("simple-benchmark-streams-with-store" + enableCaching);
KStreamBuilder builder = new KStreamBuilder();
if (enableCaching) {
builder.addStateStore(Stores.create("store").withIntegerKeys().withByteArrayValues().persistent().enableCaching().build());
} else {
builder.addStateStore(Stores.create("store").withIntegerKeys().withByteArrayValues().persistent().build());
}
KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
KeyValueStore<Integer, byte[]> store;
@SuppressWarnings("unchecked")
@Override
public void init(ProcessorContext context) {
store = (KeyValueStore<Integer, byte[]>) context.getStateStore("store");
}
@Override
public void process(Integer key, byte[] value) {
store.put(key, value);
processedRecords++;
processedBytes += value.length + Integer.SIZE;
if (processedRecords == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
}, "store");
return createKafkaStreamsWithExceptionHandler(builder, props);
}
use of org.apache.kafka.streams.processor.ProcessorContext in project kafka by apache.
the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.
@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
final String storeName = "test";
final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {
@Override
public void init(final ProcessorContext context, final StateStore root) {
context.register(root, true, null);
}
@Override
public boolean persistent() {
return true;
}
};
final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
final TopicPartition partition = new TopicPartition(changelogTopic, 0);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public Map<TopicPartition, Long> offsets() {
return Collections.singletonMap(partition, 543L);
}
};
restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
streamTask.commit();
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
use of org.apache.kafka.streams.processor.ProcessorContext in project kafka by apache.
the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContextWhenPunctuating.
@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContextWhenPunctuating() throws Exception {
final ProcessorNode punctuator = new ProcessorNode("test", new AbstractProcessor() {
@Override
public void init(final ProcessorContext context) {
context.schedule(1);
}
@Override
public void process(final Object key, final Object value) {
//
}
@Override
public void punctuate(final long timestamp) {
throw new KafkaException("KABOOM!");
}
}, Collections.<String>emptySet());
punctuator.init(new NoOpProcessorContext());
try {
task.punctuate(punctuator, 1);
fail("Should've thrown StreamsException");
} catch (StreamsException e) {
final String message = e.getMessage();
assertTrue("message=" + message + " should contain processor", message.contains("processor=test"));
assertThat(((ProcessorContextImpl) task.processorContext()).currentNode(), nullValue());
}
}
use of org.apache.kafka.streams.processor.ProcessorContext in project kafka by apache.
the class KStreamTransformValuesTest method testTransform.
@Test
public void testTransform() {
KStreamBuilder builder = new KStreamBuilder();
ValueTransformerSupplier<Number, Integer> valueTransformerSupplier = new ValueTransformerSupplier<Number, Integer>() {
public ValueTransformer<Number, Integer> get() {
return new ValueTransformer<Number, Integer>() {
private int total = 0;
@Override
public void init(ProcessorContext context) {
}
@Override
public Integer transform(Number value) {
total += value.intValue();
return total;
}
@Override
public Integer punctuate(long timestamp) {
return null;
}
@Override
public void close() {
}
};
}
};
final int[] expectedKeys = { 1, 10, 100, 1000 };
KStream<Integer, Integer> stream;
MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>();
stream = builder.stream(intSerde, intSerde, topicName);
stream.transformValues(valueTransformerSupplier).process(processor);
driver = new KStreamTestDriver(builder);
for (int expectedKey : expectedKeys) {
driver.process(topicName, expectedKey, expectedKey * 10);
}
assertEquals(4, processor.processed.size());
String[] expected = { "1:10", "10:110", "100:1110", "1000:11110" };
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], processor.processed.get(i));
}
}
Aggregations