use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class EosV2UpgradeIntegrationTest method getKafkaStreams.
private KafkaStreams getKafkaStreams(final String appDir, final String processingGuarantee) {
final StreamsBuilder builder = new StreamsBuilder();
final String[] storeNames = new String[] { storeName };
final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
builder.addStateStore(storeBuilder);
final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {
@Override
public Transformer<Long, Long, KeyValue<Long, Long>> get() {
return new Transformer<Long, Long, KeyValue<Long, Long>>() {
ProcessorContext context;
KeyValueStore<Long, Long> state = null;
AtomicBoolean crash;
AtomicInteger sharedCommit;
@Override
public void init(final ProcessorContext context) {
this.context = context;
state = context.getStateStore(storeName);
final String clientId = context.appConfigs().get(StreamsConfig.CLIENT_ID_CONFIG).toString();
if (APP_DIR_1.equals(clientId)) {
crash = errorInjectedClient1;
sharedCommit = commitCounterClient1;
} else {
crash = errorInjectedClient2;
sharedCommit = commitCounterClient2;
}
}
@Override
public KeyValue<Long, Long> transform(final Long key, final Long value) {
if ((value + 1) % 10 == 0) {
if (sharedCommit.get() < 0 || sharedCommit.incrementAndGet() == 2) {
context.commit();
}
commitRequested.incrementAndGet();
}
Long sum = state.get(key);
if (sum == null) {
sum = value;
} else {
sum += value;
}
state.put(key, sum);
state.flush();
if (// potentially crash when processing 5th, 15th, or 25th record (etc.)
value % 10 == 4 && crash != null && crash.compareAndSet(true, false)) {
// only crash a single task
throw new RuntimeException("Injected test exception.");
}
return new KeyValue<>(key, state.get(key));
}
@Override
public void close() {
}
};
}
}, storeNames).to(MULTI_PARTITION_OUTPUT_TOPIC);
final Properties properties = new Properties();
properties.put(StreamsConfig.CLIENT_ID_CONFIG, appDir);
properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, processingGuarantee);
final long commitInterval = Duration.ofMinutes(1L).toMillis();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitInterval);
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), Duration.ofSeconds(1L).toMillis());
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).toMillis());
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).minusMillis(1L).toMillis());
properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitInterval);
properties.put(StreamsConfig.producerPrefix(ProducerConfig.PARTITIONER_CLASS_CONFIG), KeyPartitioner.class);
properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
properties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath() + File.separator + appDir);
properties.put(InternalConfig.ASSIGNMENT_LISTENER, assignmentListener);
final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
final KafkaStreams streams = new KafkaStreams(builder.build(), config, new TestKafkaClientSupplier());
streams.setUncaughtExceptionHandler(e -> {
if (!injectError) {
// we don't expect any exception thrown in stop case
e.printStackTrace(System.err);
hasUnexpectedError = true;
} else {
int exceptionCount = (int) exceptionCounts.get(appDir);
// should only have our injected exception or commit exception, and 2 exceptions for each stream
if (++exceptionCount > 2 || !(e instanceof RuntimeException) || !(e.getMessage().contains("test exception"))) {
// The exception won't cause the test fail since we actually "expected" exception thrown and failed the stream.
// So, log to stderr for debugging when the exception is not what we expected, and fail in the main thread
e.printStackTrace(System.err);
hasUnexpectedError = true;
}
exceptionCounts.put(appDir, exceptionCount);
}
return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
});
return streams;
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KTableEfficientRangeQueryTest method testStoreConfig.
@Test
public void testStoreConfig() {
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, TABLE_NAME, enableLogging, enableCaching);
// Create topology: table from input topic
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> table = builder.table("input", stateStoreConfig);
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
// get input topic and stateStore
final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
final ReadOnlyKeyValueStore<String, String> stateStore = driver.getKeyValueStore(TABLE_NAME);
// write some data
for (final KeyValue<String, String> kv : records) {
input.pipeInput(kv.key, kv.value);
}
// query the state store
try (final KeyValueIterator<String, String> scanIterator = forward ? stateStore.range(null, null) : stateStore.reverseRange(null, null)) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
try (final KeyValueIterator<String, String> allIterator = forward ? stateStore.all() : stateStore.reverseAll()) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(allIterator, dataIterator);
}
testRange("range", stateStore, innerLow, innerHigh, forward);
testRange("until", stateStore, null, middle, forward);
testRange("from", stateStore, middle, null, forward);
testRange("untilBetween", stateStore, null, innerHighBetween, forward);
testRange("fromBetween", stateStore, innerLowBetween, null, forward);
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KTableKTableForeignKeyJoinMaterializationIntegrationTest method getTopology.
private Topology getTopology(final Properties streamsConfig, final String queryableStoreName) {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
if (queryable) {
materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableStoreName).withValueSerde(Serdes.String());
} else {
materialized = Materialized.with(null, Serdes.String());
}
final KTable<String, String> joinResult;
if (this.materialized) {
joinResult = left.join(right, extractor, joiner, materialized);
} else {
joinResult = left.join(right, extractor, joiner);
}
joinResult.toStream().to(OUTPUT, Produced.with(null, Serdes.String()));
return builder.build(streamsConfig);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class StandbyTaskCreationIntegrationTest method shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled.
@Test
public void shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final String stateStoreName = "myTransformState";
final StoreBuilder<KeyValueStore<Integer, Integer>> keyValueStoreBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(stateStoreName), Serdes.Integer(), Serdes.Integer()).withLoggingDisabled();
builder.addStateStore(keyValueStoreBuilder);
builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Integer(), Serdes.Integer())).transform(() -> new Transformer<Integer, Integer, KeyValue<Integer, Integer>>() {
@Override
public void init(final ProcessorContext context) {
}
@Override
public KeyValue<Integer, Integer> transform(final Integer key, final Integer value) {
return null;
}
@Override
public void close() {
}
}, stateStoreName);
final Topology topology = builder.build();
createClients(topology, streamsConfiguration(), topology, streamsConfiguration());
setStateListenersForVerification(thread -> thread.standbyTasks().isEmpty() && !thread.activeTasks().isEmpty());
startClients();
waitUntilBothClientAreOK("At least one client did not reach state RUNNING with active tasks but no stand-by tasks");
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class GlobalKTableIntegrationTest method before.
@Before
public void before() throws Exception {
builder = new StreamsBuilder();
createTopics();
streamsConfiguration = new Properties();
final String safeTestName = safeUniqueTestName(getClass(), testName);
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
globalTable = builder.globalTable(globalTableTopic, Consumed.with(Serdes.Long(), Serdes.String()), Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as(globalStore).withKeySerde(Serdes.Long()).withValueSerde(Serdes.String()));
final Consumed<String, Long> stringLongConsumed = Consumed.with(Serdes.String(), Serdes.Long());
stream = builder.stream(streamTopic, stringLongConsumed);
supplier = new MockApiProcessorSupplier<>();
}
Aggregations