use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KGroupedStreamImplTest method shouldLogAndMeasureSkipsInAggregate.
@Test
public void shouldLogAndMeasureSkipsInAggregate() {
groupedStream.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("count").withKeySerde(Serdes.String()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamAggregate.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(driver);
assertThat(appender.getMessages(), hasItem("Skipping record due to null key or value. topic=[topic] partition=[0] " + "offset=[6]"));
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class InternalStreamsBuilderTest method shouldNotMaterializeSourceKTableIfNotRequired.
@Test
public void shouldNotMaterializeSourceKTableIfNotRequired() {
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(Materialized.with(null, null), builder, storePrefix);
final KTable<String, String> table1 = builder.table("topic2", consumed, materializedInternal);
builder.buildAndOptimizeTopology();
final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(StreamsTestUtils.getStreamsConfig(APP_ID))).buildTopology();
assertEquals(0, topology.stateStores().size());
assertEquals(0, topology.storeToChangelogTopic().size());
assertNull(table1.queryableStoreName());
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KGroupedTableImplTest method shouldReduce.
@Test
public void shouldReduce() {
final KeyValueMapper<String, Number, KeyValue<String, Integer>> intProjection = (key, value) -> KeyValue.pair(key, value.intValue());
final KTable<String, Integer> reduced = builder.table(topic, Consumed.with(Serdes.String(), Serdes.Double()), Materialized.<String, Double, KeyValueStore<Bytes, byte[]>>as("store").withKeySerde(Serdes.String()).withValueSerde(Serdes.Double())).groupBy(intProjection).reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, Materialized.as("reduced"));
final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = getReducedResults(reduced);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
assertReduced(supplier.theCapturedProcessor().lastValueAndTimestampPerKey(), topic, driver);
assertEquals(reduced.queryableStoreName(), "reduced");
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class EosIntegrationTest method getKafkaStreams.
// the threads should no longer fail one thread one at a time
@SuppressWarnings("deprecation")
private KafkaStreams getKafkaStreams(final String dummyHostName, final boolean withState, final String appDir, final int numberOfStreamsThreads, final String eosConfig, final int maxPollIntervalMs) {
commitRequested = new AtomicInteger(0);
errorInjected = new AtomicBoolean(false);
stallInjected = new AtomicBoolean(false);
stallingHost = new AtomicReference<>();
final StreamsBuilder builder = new StreamsBuilder();
String[] storeNames = new String[0];
if (withState) {
storeNames = new String[] { storeName };
final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
builder.addStateStore(storeBuilder);
}
final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {
@SuppressWarnings("unchecked")
@Override
public Transformer<Long, Long, KeyValue<Long, Long>> get() {
return new Transformer<Long, Long, KeyValue<Long, Long>>() {
ProcessorContext context;
KeyValueStore<Long, Long> state = null;
@Override
public void init(final ProcessorContext context) {
this.context = context;
if (withState) {
state = (KeyValueStore<Long, Long>) context.getStateStore(storeName);
}
}
@Override
public KeyValue<Long, Long> transform(final Long key, final Long value) {
if (stallInjected.compareAndSet(true, false)) {
LOG.info(dummyHostName + " is executing the injected stall");
stallingHost.set(dummyHostName);
while (doStall) {
final StreamThread thread = (StreamThread) Thread.currentThread();
if (thread.isInterrupted() || !thread.isRunning()) {
throw new RuntimeException("Detected we've been interrupted.");
}
try {
Thread.sleep(100);
} catch (final InterruptedException e) {
throw new RuntimeException(e);
}
}
}
if ((value + 1) % 10 == 0) {
context.commit();
commitRequested.incrementAndGet();
}
if (state != null) {
Long sum = state.get(key);
if (sum == null) {
sum = value;
} else {
sum += value;
}
state.put(key, sum);
state.flush();
}
if (errorInjected.compareAndSet(true, false)) {
// only tries to fail once on one of the task
throw new RuntimeException("Injected test exception.");
}
if (state != null) {
return new KeyValue<>(key, state.get(key));
} else {
return new KeyValue<>(key, value);
}
}
@Override
public void close() {
}
};
}
}, storeNames).to(SINGLE_PARTITION_OUTPUT_TOPIC);
stateTmpDir = TestUtils.tempDirectory().getPath() + File.separator;
final Properties properties = new Properties();
// Set commit interval to a larger value to avoid affection of controlled stream commit,
// but not too large as we need to have a relatively low transaction timeout such
// that it should help trigger the timed out transaction in time.
final long commitIntervalMs = 20_000L;
properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, eosConfig);
properties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numberOfStreamsThreads);
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitIntervalMs);
properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitIntervalMs);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), "1000");
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), maxPollIntervalMs);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), maxPollIntervalMs - 1);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), maxPollIntervalMs);
properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
properties.put(StreamsConfig.STATE_DIR_CONFIG, stateTmpDir + appDir);
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, dummyHostName + ":2142");
final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
final KafkaStreams streams = new KafkaStreams(builder.build(), config);
streams.setUncaughtExceptionHandler((t, e) -> {
if (uncaughtException != null || !e.getMessage().contains("Injected test exception")) {
e.printStackTrace(System.err);
hasUnexpectedError = true;
}
uncaughtException = e;
});
return streams;
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KafkaStreamsTest method getStatefulTopology.
// testing old PAPI
@Deprecated
private Topology getStatefulTopology(final String inputTopic, final String outputTopic, final String globalTopicName, final String storeName, final String globalStoreName, final boolean isPersistentStore) {
final StoreBuilder<KeyValueStore<String, Long>> storeBuilder = Stores.keyValueStoreBuilder(isPersistentStore ? Stores.persistentKeyValueStore(storeName) : Stores.inMemoryKeyValueStore(storeName), Serdes.String(), Serdes.Long());
final Topology topology = new Topology();
topology.addSource("source", Serdes.String().deserializer(), Serdes.String().deserializer(), inputTopic).addProcessor("process", () -> new Processor<String, String, String, String>() {
private ProcessorContext<String, String> context;
@Override
public void init(final ProcessorContext<String, String> context) {
this.context = context;
}
@Override
public void process(final Record<String, String> record) {
final KeyValueStore<String, Long> kvStore = context.getStateStore(storeName);
kvStore.put(record.key(), 5L);
context.forward(record.withValue("5"));
context.commit();
}
}, "source").addStateStore(storeBuilder, "process").addSink("sink", outputTopic, new StringSerializer(), new StringSerializer(), "process");
final StoreBuilder<KeyValueStore<String, String>> globalStoreBuilder = Stores.keyValueStoreBuilder(isPersistentStore ? Stores.persistentKeyValueStore(globalStoreName) : Stores.inMemoryKeyValueStore(globalStoreName), Serdes.String(), Serdes.String()).withLoggingDisabled();
topology.addGlobalStore(globalStoreBuilder, "global", Serdes.String().deserializer(), Serdes.String().deserializer(), globalTopicName, globalTopicName + "-processor", new MockProcessorSupplier<>());
return topology;
}
Aggregations