use of org.apache.kafka.streams.state.KeyValueStore in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorTopologyTest method shouldDriveGlobalStore.
@SuppressWarnings("unchecked")
@Test
public void shouldDriveGlobalStore() {
final String storeName = "my-store";
final StateStoreSupplier storeSupplier = Stores.create(storeName).withStringKeys().withStringValues().inMemory().disableLogging().build();
final String global = "global";
final String topic = "topic";
final TopologyBuilder topologyBuilder = this.builder.addGlobalStore(storeSupplier, global, STRING_DESERIALIZER, STRING_DESERIALIZER, topic, "processor", define(new StatefulProcessor(storeName)));
driver = new ProcessorTopologyTestDriver(config, topologyBuilder.internalTopologyBuilder);
final KeyValueStore<String, String> globalStore = (KeyValueStore<String, String>) topologyBuilder.globalStateStores().get(storeName);
driver.process(topic, "key1", "value1", STRING_SERIALIZER, STRING_SERIALIZER);
driver.process(topic, "key2", "value2", STRING_SERIALIZER, STRING_SERIALIZER);
assertEquals("value1", globalStore.get("key1"));
assertEquals("value2", globalStore.get("key2"));
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KStreamImpl method toTable.
@Override
public KTable<K, V> toTable(final Named named, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) {
Objects.requireNonNull(named, "named can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
final NamedInternal namedInternal = new NamedInternal(named);
final String name = namedInternal.orElseGenerateWithPrefix(builder, TO_KTABLE_NAME);
final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(materialized, builder, TO_KTABLE_NAME);
final Serde<K> keySerdeOverride = materializedInternal.keySerde() == null ? keySerde : materializedInternal.keySerde();
final Serde<V> valueSerdeOverride = materializedInternal.valueSerde() == null ? valueSerde : materializedInternal.valueSerde();
final Set<String> subTopologySourceNodes;
final GraphNode tableParentNode;
if (repartitionRequired) {
final OptimizableRepartitionNodeBuilder<K, V> repartitionNodeBuilder = optimizableRepartitionNodeBuilder();
final String sourceName = createRepartitionedSource(builder, keySerdeOverride, valueSerdeOverride, name, null, repartitionNodeBuilder);
tableParentNode = repartitionNodeBuilder.build();
builder.addGraphNode(graphNode, tableParentNode);
subTopologySourceNodes = Collections.singleton(sourceName);
} else {
tableParentNode = graphNode;
subTopologySourceNodes = this.subTopologySourceNodes;
}
final KTableSource<K, V> tableSource = new KTableSource<>(materializedInternal.storeName(), materializedInternal.queryableStoreName());
final ProcessorParameters<K, V, ?, ?> processorParameters = new ProcessorParameters<>(tableSource, name);
final GraphNode tableNode = new StreamToTableNode<>(name, processorParameters, materializedInternal);
builder.addGraphNode(tableParentNode, tableNode);
return new KTableImpl<K, V, V>(name, keySerdeOverride, valueSerdeOverride, subTopologySourceNodes, materializedInternal.queryableStoreName(), tableSource, tableNode, builder);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.
private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
final String testId = safeUniqueTestName(getClass(), testName);
final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
final String inputTopic = "input" + testId;
final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
final String storeName = "store" + testId;
final String storeChangelog = appId + "-store" + testId + "-changelog";
final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
final ReentrantLock assignmentLock = new ReentrantLock();
final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
final AtomicBoolean assignmentStable = new AtomicBoolean(false);
final AssignmentListener assignmentListener = stable -> {
assignmentLock.lock();
try {
final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
assignmentsStable.put(thisAssignmentIndex, stable);
assignmentStable.set(stable);
} finally {
assignmentLock.unlock();
}
};
final StreamsBuilder builder = new StreamsBuilder();
builder.table(inputTopic, materializedFunction.apply(storeName));
final Topology topology = builder.build();
final int numberOfRecords = 500;
produceTestData(inputTopic, numberOfRecords);
try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
kafkaStreams0.start();
// sanity check: just make sure we actually wrote all the input records
TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
// wait until all the input records are in the changelog
TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
final AtomicLong instance1TotalRestored = new AtomicLong(-1);
final AtomicLong instance1NumRestored = new AtomicLong(-1);
final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
restoreCompleteLatch.countDown();
}
});
final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
kafkaStreams1.start();
TestUtils.waitForCondition(() -> {
assignmentLock.lock();
try {
if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
return true;
} else {
return false;
}
} finally {
assignmentLock.unlock();
}
}, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
restoreCompleteLatch.await();
// We should finalize the restoration without having restored any records (because they're already in
// the store. Otherwise, we failed to properly re-use the state from the standby.
assertThat(instance1TotalRestored.get(), is(0L));
// Belt-and-suspenders check that we never even attempt to restore any records.
assertThat(instance1NumRestored.get(), is(-1L));
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class GlobalKTableEOSIntegrationTest method before.
@Before
public void before() throws Exception {
builder = new StreamsBuilder();
createTopics();
streamsConfiguration = new Properties();
final String safeTestName = safeUniqueTestName(getClass(), testName);
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0L);
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
streamsConfiguration.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, eosConfig);
streamsConfiguration.put(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 1L);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
streamsConfiguration.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000);
streamsConfiguration.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 300);
streamsConfiguration.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 5000);
globalTable = builder.globalTable(globalTableTopic, Consumed.with(Serdes.Long(), Serdes.String()), Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as(globalStore).withKeySerde(Serdes.Long()).withValueSerde(Serdes.String()));
final Consumed<String, Long> stringLongConsumed = Consumed.with(Serdes.String(), Serdes.Long());
stream = builder.stream(streamTopic, stringLongConsumed);
foreachAction = results::put;
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KTableEfficientRangeQueryTest method getStoreConfig.
private Materialized<String, String, KeyValueStore<Bytes, byte[]>> getStoreConfig(final StoreType type, final String name, final boolean cachingEnabled, final boolean loggingEnabled) {
final Supplier<KeyValueBytesStoreSupplier> createStore = () -> {
if (type == StoreType.InMemory) {
return Stores.inMemoryKeyValueStore(TABLE_NAME);
} else if (type == StoreType.RocksDB) {
return Stores.persistentKeyValueStore(TABLE_NAME);
} else if (type == StoreType.Timed) {
return Stores.persistentTimestampedKeyValueStore(TABLE_NAME);
} else {
return Stores.inMemoryKeyValueStore(TABLE_NAME);
}
};
final KeyValueBytesStoreSupplier stateStoreSupplier = createStore.get();
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = Materialized.<String, String>as(stateStoreSupplier).withKeySerde(Serdes.String()).withValueSerde(Serdes.String());
if (cachingEnabled) {
stateStoreConfig.withCachingEnabled();
} else {
stateStoreConfig.withCachingDisabled();
}
if (loggingEnabled) {
stateStoreConfig.withLoggingEnabled(new HashMap<String, String>());
} else {
stateStoreConfig.withLoggingDisabled();
}
return stateStoreConfig;
}
Aggregations