use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest method prepareTopology.
private static KafkaStreams prepareTopology(final String queryableName, final Properties streamsConfig) {
final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> table1 = builder.stream(TABLE_1, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false))).repartition(repartitionA()).toTable(Named.as("table.a"));
final KTable<String, String> table2 = builder.stream(TABLE_2, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false))).repartition(repartitionB()).toTable(Named.as("table.b"));
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
if (queryableName != null) {
materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableName).withKeySerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
} else {
throw new RuntimeException("Current implementation of joinOnForeignKey requires a materialized store");
}
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "value1=" + value1 + ",value2=" + value2;
final TableJoined<String, String> tableJoined = TableJoined.with((topic, key, value, numPartitions) -> Math.abs(getKeyB(key).hashCode()) % numPartitions, (topic, key, value, numPartitions) -> Math.abs(key.hashCode()) % numPartitions);
table1.join(table2, KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest::getKeyB, joiner, tableJoined, materialized).toStream().to(OUTPUT, Produced.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
return new KafkaStreams(builder.build(streamsConfig), streamsConfig);
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class IQv2IntegrationTest method shouldNotRequireQueryHandler.
@Test
public void shouldNotRequireQueryHandler() {
final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
final int partition = 1;
final Set<Integer> partitions = singleton(partition);
final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {
@Override
public String name() {
return STORE_NAME;
}
@Override
public KeyValueStore<Bytes, byte[]> get() {
return new KeyValueStore<Bytes, byte[]>() {
private boolean open = false;
private Map<Bytes, byte[]> map = new HashMap<>();
private Position position;
private StateStoreContext context;
@Override
public void put(final Bytes key, final byte[] value) {
map.put(key, value);
StoreQueryUtils.updatePosition(position, context);
}
@Override
public byte[] putIfAbsent(final Bytes key, final byte[] value) {
StoreQueryUtils.updatePosition(position, context);
return map.putIfAbsent(key, value);
}
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
StoreQueryUtils.updatePosition(position, context);
for (final KeyValue<Bytes, byte[]> entry : entries) {
map.put(entry.key, entry.value);
}
}
@Override
public byte[] delete(final Bytes key) {
StoreQueryUtils.updatePosition(position, context);
return map.remove(key);
}
@Override
public String name() {
return STORE_NAME;
}
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
throw new UnsupportedOperationException();
}
@Override
public void init(final StateStoreContext context, final StateStore root) {
context.register(root, (key, value) -> put(Bytes.wrap(key), value));
this.open = true;
this.position = Position.emptyPosition();
this.context = context;
}
@Override
public void flush() {
}
@Override
public void close() {
this.open = false;
map.clear();
}
@Override
public boolean persistent() {
return false;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
return position;
}
@Override
public byte[] get(final Bytes key) {
return map.get(key);
}
@Override
public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
throw new UnsupportedOperationException();
}
@Override
public KeyValueIterator<Bytes, byte[]> all() {
throw new UnsupportedOperationException();
}
@Override
public long approximateNumEntries() {
return map.size();
}
};
}
@Override
public String metricsScope() {
return "nonquery";
}
}));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
kafkaStreams.cleanUp();
kafkaStreams.start();
final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
assertThat(queryResult.isFailure(), is(true));
assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class OptimizedKTableIntegrationTest method shouldApplyUpdatesToStandbyStore.
@Test
public void shouldApplyUpdatesToStandbyStore() throws Exception {
final int batch1NumMessages = 100;
final int batch2NumMessages = 100;
final int key = 1;
final Semaphore semaphore = new Semaphore(0);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.<Integer, Integer, KeyValueStore<Bytes, byte[]>>as(TABLE_NAME).withCachingDisabled()).toStream().peek((k, v) -> semaphore.release());
final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
produceValueRange(key, 0, batch1NumMessages);
// Assert that all messages in the first batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
final ReadOnlyKeyValueStore<Integer, Integer> store1 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams1, QueryableStoreTypes.keyValueStore());
final ReadOnlyKeyValueStore<Integer, Integer> store2 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams2, QueryableStoreTypes.keyValueStore());
final boolean kafkaStreams1WasFirstActive;
final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
// Assert that the current value in store reflects all messages being processed
if ((keyQueryMetadata.activeHost().port() % 2) == 1) {
assertThat(store1.get(key), is(equalTo(batch1NumMessages - 1)));
kafkaStreams1WasFirstActive = true;
} else {
assertThat(store2.get(key), is(equalTo(batch1NumMessages - 1)));
kafkaStreams1WasFirstActive = false;
}
if (kafkaStreams1WasFirstActive) {
kafkaStreams1.close();
} else {
kafkaStreams2.close();
}
final ReadOnlyKeyValueStore<Integer, Integer> newActiveStore = kafkaStreams1WasFirstActive ? store2 : store1;
TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
// Assert that after failover we have recovered to the last store write
assertThat(newActiveStore.get(key), is(equalTo(batch1NumMessages - 1)));
});
final int totalNumMessages = batch1NumMessages + batch2NumMessages;
produceValueRange(key, batch1NumMessages, totalNumMessages);
// Assert that all messages in the second batch were processed in a timely manner
assertThat(semaphore.tryAcquire(batch2NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
// Assert that the current value in store reflects all messages being processed
assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1)));
});
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class RangeQueryIntegrationTest method testStoreConfig.
@Test
public void testStoreConfig() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, enableLogging, enableCaching);
builder.table(inputStream, stateStoreConfig);
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG)) {
IntegrationTestUtils.startApplicationAndWaitUntilRunning(Collections.singletonList(kafkaStreams), Duration.ofSeconds(60));
writeInputData();
final ReadOnlyKeyValueStore<String, String> stateStore = IntegrationTestUtils.getStore(1000_000L, TABLE_NAME, kafkaStreams, QueryableStoreTypes.keyValueStore());
// wait for the store to populate
TestUtils.waitForCondition(() -> stateStore.get(high) != null, "The store never finished populating");
// query the state store
try (final KeyValueIterator<String, String> scanIterator = forward ? stateStore.range(null, null) : stateStore.reverseRange(null, null)) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
try (final KeyValueIterator<String, String> allIterator = forward ? stateStore.all() : stateStore.reverseAll()) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(allIterator, dataIterator);
}
testRange("range", stateStore, innerLow, innerHigh, forward);
testRange("until", stateStore, null, middle, forward);
testRange("from", stateStore, middle, null, forward);
testRange("untilBetween", stateStore, null, innerHighBetween, forward);
testRange("fromBetween", stateStore, innerLowBetween, null, forward);
}
}
use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.
the class RangeQueryIntegrationTest method getStoreConfig.
private Materialized<String, String, KeyValueStore<Bytes, byte[]>> getStoreConfig(final StoreType type, final boolean cachingEnabled, final boolean loggingEnabled) {
final Supplier<KeyValueBytesStoreSupplier> createStore = () -> {
if (type == StoreType.InMemory) {
return Stores.inMemoryKeyValueStore(TABLE_NAME);
} else if (type == StoreType.RocksDB) {
return Stores.persistentKeyValueStore(TABLE_NAME);
} else if (type == StoreType.Timed) {
return Stores.persistentTimestampedKeyValueStore(TABLE_NAME);
} else {
return Stores.inMemoryKeyValueStore(TABLE_NAME);
}
};
final KeyValueBytesStoreSupplier stateStoreSupplier = createStore.get();
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = Materialized.<String, String>as(stateStoreSupplier).withKeySerde(Serdes.String()).withValueSerde(Serdes.String());
if (cachingEnabled) {
stateStoreConfig.withCachingEnabled();
} else {
stateStoreConfig.withCachingDisabled();
}
if (loggingEnabled) {
stateStoreConfig.withLoggingEnabled(new HashMap<>());
} else {
stateStoreConfig.withLoggingDisabled();
}
return stateStoreConfig;
}
Aggregations