use of org.apache.kafka.streams.state.KeyValueBytesStoreSupplier in project apache-kafka-on-k8s by banzaicloud.
the class RestoreIntegrationTest method shouldProcessDataFromStoresWithLoggingDisabled.
@Test
public void shouldProcessDataFromStoresWithLoggingDisabled() throws InterruptedException, ExecutionException {
IntegrationTestUtils.produceKeyValuesSynchronously(INPUT_STREAM_2, Arrays.asList(KeyValue.pair(1, 1), KeyValue.pair(2, 2), KeyValue.pair(3, 3)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, IntegerSerializer.class), CLUSTER.time);
final KeyValueBytesStoreSupplier lruMapSupplier = Stores.lruMap(INPUT_STREAM_2, 10);
final StoreBuilder<KeyValueStore<Integer, Integer>> storeBuilder = new KeyValueStoreBuilder<>(lruMapSupplier, Serdes.Integer(), Serdes.Integer(), CLUSTER.time).withLoggingDisabled();
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.addStateStore(storeBuilder);
final KStream<Integer, Integer> stream = streamsBuilder.stream(INPUT_STREAM_2);
final CountDownLatch processorLatch = new CountDownLatch(3);
stream.process(new ProcessorSupplier<Integer, Integer>() {
@Override
public Processor<Integer, Integer> get() {
return new KeyValueStoreProcessor(INPUT_STREAM_2, processorLatch);
}
}, INPUT_STREAM_2);
final Topology topology = streamsBuilder.build();
kafkaStreams = new KafkaStreams(topology, props(applicationId + "-logging-disabled"));
final CountDownLatch latch = new CountDownLatch(1);
kafkaStreams.setStateListener(new KafkaStreams.StateListener() {
@Override
public void onChange(final KafkaStreams.State newState, final KafkaStreams.State oldState) {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
latch.countDown();
}
}
});
kafkaStreams.start();
latch.await(30, TimeUnit.SECONDS);
assertTrue(processorLatch.await(30, TimeUnit.SECONDS));
}
use of org.apache.kafka.streams.state.KeyValueBytesStoreSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamsStandByReplicaTest method main.
public static void main(final String[] args) throws IOException {
System.out.println("StreamsTest instance started");
final String kafka = args.length > 0 ? args[0] : "localhost:9092";
final String propFileName = args.length > 1 ? args[1] : null;
final String additionalConfigs = args.length > 2 ? args[2] : null;
final Serde<String> stringSerde = Serdes.String();
final Properties streamsProperties = Utils.loadProps(propFileName);
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-streams-standby-tasks");
streamsProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
streamsProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100);
streamsProperties.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
streamsProperties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
streamsProperties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
streamsProperties.put(StreamsConfig.producerPrefix(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), true);
if (additionalConfigs == null) {
System.err.println("additional configs are not provided");
System.err.flush();
System.exit(1);
}
final Map<String, String> updated = SystemTestUtil.parseConfigs(additionalConfigs);
System.out.println("Updating configs with " + updated);
final String sourceTopic = updated.remove("sourceTopic");
final String sinkTopic1 = updated.remove("sinkTopic1");
final String sinkTopic2 = updated.remove("sinkTopic2");
if (sourceTopic == null || sinkTopic1 == null || sinkTopic2 == null) {
System.err.println(String.format("one or more required topics null sourceTopic[%s], sinkTopic1[%s], sinkTopic2[%s]", sourceTopic, sinkTopic1, sinkTopic2));
System.err.flush();
System.exit(1);
}
streamsProperties.putAll(updated);
if (!confirmCorrectConfigs(streamsProperties)) {
System.err.println(String.format("ERROR: Did not have all required configs expected to contain %s, %s, %s, %s", StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.RETRIES_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), StreamsConfig.producerPrefix(ProducerConfig.MAX_BLOCK_MS_CONFIG)));
System.exit(1);
}
final StreamsBuilder builder = new StreamsBuilder();
String inMemoryStoreName = "in-memory-store";
String persistentMemoryStoreName = "persistent-memory-store";
KeyValueBytesStoreSupplier inMemoryStoreSupplier = Stores.inMemoryKeyValueStore(inMemoryStoreName);
KeyValueBytesStoreSupplier persistentStoreSupplier = Stores.persistentKeyValueStore(persistentMemoryStoreName);
KStream<String, String> inputStream = builder.stream(sourceTopic, Consumed.with(stringSerde, stringSerde));
ValueMapper<Long, String> countMapper = new ValueMapper<Long, String>() {
@Override
public String apply(final Long value) {
return value.toString();
}
};
inputStream.groupByKey().count(Materialized.<String, Long>as(inMemoryStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic1, Produced.with(stringSerde, stringSerde));
inputStream.groupByKey().count(Materialized.<String, Long>as(persistentStoreSupplier)).toStream().mapValues(countMapper).to(sinkTopic2, Produced.with(stringSerde, stringSerde));
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsProperties);
streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(final Thread t, final Throwable e) {
System.err.println("FATAL: An unexpected exception " + e);
e.printStackTrace(System.err);
System.err.flush();
shutdown(streams);
}
});
streams.setStateListener(new KafkaStreams.StateListener() {
@Override
public void onChange(final KafkaStreams.State newState, final KafkaStreams.State oldState) {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
final Set<ThreadMetadata> threadMetadata = streams.localThreadsMetadata();
for (final ThreadMetadata threadMetadatum : threadMetadata) {
System.out.println("ACTIVE_TASKS:" + threadMetadatum.activeTasks().size() + " STANDBY_TASKS:" + threadMetadatum.standbyTasks().size());
}
}
}
});
System.out.println("Start Kafka Streams");
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
shutdown(streams);
System.out.println("Shut down streams now");
}
}));
}
use of org.apache.kafka.streams.state.KeyValueBytesStoreSupplier in project apache-kafka-on-k8s by banzaicloud.
the class KeyValueStoreMaterializerTest method shouldCreateKeyValueStoreWithTheProvidedInnerStore.
@Test
public void shouldCreateKeyValueStoreWithTheProvidedInnerStore() {
final KeyValueBytesStoreSupplier supplier = EasyMock.createNiceMock(KeyValueBytesStoreSupplier.class);
final InMemoryKeyValueStore<Bytes, byte[]> store = new InMemoryKeyValueStore<>("name", Serdes.Bytes(), Serdes.ByteArray());
EasyMock.expect(supplier.name()).andReturn("name").anyTimes();
EasyMock.expect(supplier.get()).andReturn(store);
EasyMock.replay(supplier);
final MaterializedInternal<String, Integer, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.<String, Integer>as(supplier), nameProvider, storePrefix);
final KeyValueStoreMaterializer<String, Integer> materializer = new KeyValueStoreMaterializer<>(materialized);
final StoreBuilder<KeyValueStore<String, Integer>> builder = materializer.materialize();
final KeyValueStore<String, Integer> built = builder.build();
final StateStore inner = ((WrappedStateStore) built).inner();
assertThat(inner, CoreMatchers.<StateStore>equalTo(store));
}
use of org.apache.kafka.streams.state.KeyValueBytesStoreSupplier in project kafkastreams-cep by fhussonnois.
the class QueryStoreBuilders method getNFAStateStoreBuilder.
/**
* Build a persistent {@link StoreBuilder}.
*
* @param keySerde the key {@link Serde}.
* @param valueSerde the value {@link Serde}.
* @return a new {@link StoreBuilder} instance.
*/
public StoreBuilder<NFAStore<K, V>> getNFAStateStoreBuilder(final Serde<K> keySerde, final Serde<V> valueSerde) {
final String storeName = QueryStores.getQueryNFAStoreName(queryName);
final KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(storeName);
return QueryStores.nfaStoreBuilder(storeSupplier, stages, keySerde, valueSerde);
}
use of org.apache.kafka.streams.state.KeyValueBytesStoreSupplier in project kafkastreams-cep by fhussonnois.
the class QueryStoreBuilders method getEventBufferStoreBuilder.
/**
* Build a new {@link StoreBuilder} used to store match sequences.
*
* @param keySerde the key {@link Serde}.
* @param valueSerde the value {@link Serde}.
* @return a new {@link StoreBuilder} instance.
*/
public StoreBuilder<SharedVersionedBufferStore<K, V>> getEventBufferStoreBuilder(final Serde<K> keySerde, final Serde<V> valueSerde) {
final String storeName = QueryStores.getQueryEventBufferStoreName(queryName);
KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(storeName);
return QueryStores.bufferStoreBuilder(storeSupplier, keySerde, valueSerde);
}
Aggregations