use of org.apache.kafka.streams.processor.ProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class TopologyTestDriverTest method shouldReturnAllStores.
@Test
public void shouldReturnAllStores() {
final Topology topology = setupSourceSinkTopology();
topology.addStateStore(new KeyValueStoreBuilder<>(Stores.inMemoryKeyValueStore("store"), Serdes.ByteArray(), Serdes.ByteArray(), new SystemTime()).withLoggingDisabled());
topology.addGlobalStore(new KeyValueStoreBuilder<>(Stores.inMemoryKeyValueStore("globalStore"), Serdes.ByteArray(), Serdes.ByteArray(), new SystemTime()).withLoggingDisabled(), "sourceProcessorName", Serdes.ByteArray().deserializer(), Serdes.ByteArray().deserializer(), "globalTopicName", "globalProcessorName", new ProcessorSupplier() {
@Override
public Processor get() {
return null;
}
});
testDriver = new TopologyTestDriver(topology, config);
final Set<String> expectedStoreNames = new HashSet<>();
expectedStoreNames.add("store");
expectedStoreNames.add("globalStore");
assertThat(testDriver.getAllStateStores().keySet(), equalTo(expectedStoreNames));
}
use of org.apache.kafka.streams.processor.ProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamThreadTest method shouldPunctuateActiveTask.
@Test
public void shouldPunctuateActiveTask() {
final List<Long> punctuatedStreamTime = new ArrayList<>();
final List<Long> punctuatedWallClockTime = new ArrayList<>();
final ProcessorSupplier<Object, Object> punctuateProcessor = new ProcessorSupplier<Object, Object>() {
@Override
public Processor<Object, Object> get() {
return new Processor<Object, Object>() {
@Override
public void init(ProcessorContext context) {
context.schedule(100L, PunctuationType.STREAM_TIME, new Punctuator() {
@Override
public void punctuate(long timestamp) {
punctuatedStreamTime.add(timestamp);
}
});
context.schedule(100L, PunctuationType.WALL_CLOCK_TIME, new Punctuator() {
@Override
public void punctuate(long timestamp) {
punctuatedWallClockTime.add(timestamp);
}
});
}
@Override
public void process(Object key, Object value) {
}
@SuppressWarnings("deprecation")
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
};
internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).process(punctuateProcessor);
final StreamThread thread = createStreamThread(clientId, config, false);
thread.setState(StreamThread.State.RUNNING);
thread.rebalanceListener.onPartitionsRevoked(null);
final List<TopicPartition> assignedPartitions = new ArrayList<>();
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
clientSupplier.consumer.assign(assignedPartitions);
clientSupplier.consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener.onPartitionsAssigned(assignedPartitions);
thread.runOnce(-1);
assertEquals(0, punctuatedStreamTime.size());
assertEquals(0, punctuatedWallClockTime.size());
mockTime.sleep(100L);
for (long i = 0L; i < 10L; i++) {
clientSupplier.consumer.addRecord(new ConsumerRecord<>(topic1, 1, i, i * 100L, TimestampType.CREATE_TIME, ConsumerRecord.NULL_CHECKSUM, ("K" + i).getBytes().length, ("V" + i).getBytes().length, ("K" + i).getBytes(), ("V" + i).getBytes()));
}
thread.runOnce(-1);
assertEquals(1, punctuatedStreamTime.size());
assertEquals(1, punctuatedWallClockTime.size());
mockTime.sleep(100L);
thread.runOnce(-1);
// we should skip stream time punctuation, only trigger wall-clock time punctuation
assertEquals(1, punctuatedStreamTime.size());
assertEquals(2, punctuatedWallClockTime.size());
}
use of org.apache.kafka.streams.processor.ProcessorSupplier in project kafkastreams-cep by fhussonnois.
the class CEPStreamImpl method query.
/**
* {@inheritDoc}
*/
@Override
public KStream<K, Sequence<K, V>> query(String queryName, Pattern<K, V> pattern, Queried<K, V> queried) {
Objects.requireNonNull(queryName, "queryName can't be null");
Objects.requireNonNull(pattern, "pattern can't be null");
final String name = builder.newProcessorName("CEPSTREAM-QUERY-" + queryName.toUpperCase() + "-");
final ProcessorSupplier processor = () -> new CEPProcessor<>(queryName, pattern);
builder.internalTopologyBuilder.addProcessor(name, processor, this.name);
QueryStoreBuilders<K, V> storeBuilders = new QueryStoreBuilders<>(queryName, pattern);
final Serde<K> keySerde = (queried == null) ? null : queried.keySerde();
final Serde<V> valSerde = (queried == null) ? null : queried.valueSerde();
addStateStore(storeBuilders.getNFAStateStoreBuilder(keySerde, valSerde), name);
addStateStore(storeBuilders.getEventBufferStoreBuilder(keySerde, valSerde), name);
addStateStore(storeBuilders.getAggregateStateStores(), name);
return new KStreamImpl<>(builder, name, sourceNodes, false);
}
use of org.apache.kafka.streams.processor.ProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class TopologyTestDriverTest method shouldCleanUpPersistentStateStoresOnClose.
@Test
public void shouldCleanUpPersistentStateStoresOnClose() {
final Topology topology = new Topology();
topology.addSource("sourceProcessor", "input-topic");
topology.addProcessor("storeProcessor", new ProcessorSupplier() {
@Override
public Processor get() {
return new Processor<String, Long>() {
private KeyValueStore<String, Long> store;
@Override
public void init(final ProcessorContext context) {
// noinspection unchecked
this.store = (KeyValueStore<String, Long>) context.getStateStore("storeProcessorStore");
}
@Override
public void process(final String key, final Long value) {
store.put(key, value);
}
@Override
public void punctuate(final long timestamp) {
}
@Override
public void close() {
}
};
}
}, "sourceProcessor");
topology.addStateStore(Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("storeProcessorStore"), Serdes.String(), Serdes.Long()), "storeProcessor");
final Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "test-TopologyTestDriver-cleanup");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
config.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass().getName());
{
final TopologyTestDriver testDriver = new TopologyTestDriver(topology, config);
Assert.assertNull(testDriver.getKeyValueStore("storeProcessorStore").get("a"));
testDriver.pipeInput(recordFactory.create("input-topic", "a", 1L));
Assert.assertEquals(1L, testDriver.getKeyValueStore("storeProcessorStore").get("a"));
testDriver.close();
}
{
final TopologyTestDriver testDriver = new TopologyTestDriver(topology, config);
Assert.assertNull("Closing the prior test driver should have cleaned up this store and value.", testDriver.getKeyValueStore("storeProcessorStore").get("a"));
}
}
Aggregations