use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableEfficientRangeQueryTest method testStoreConfig.
@Test
public void testStoreConfig() {
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, TABLE_NAME, enableLogging, enableCaching);
// Create topology: table from input topic
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> table = builder.table("input", stateStoreConfig);
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
// get input topic and stateStore
final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
final ReadOnlyKeyValueStore<String, String> stateStore = driver.getKeyValueStore(TABLE_NAME);
// write some data
for (final KeyValue<String, String> kv : records) {
input.pipeInput(kv.key, kv.value);
}
// query the state store
try (final KeyValueIterator<String, String> scanIterator = forward ? stateStore.range(null, null) : stateStore.reverseRange(null, null)) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
try (final KeyValueIterator<String, String> allIterator = forward ? stateStore.all() : stateStore.reverseAll()) {
final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
TestUtils.checkEquals(allIterator, dataIterator);
}
testRange("range", stateStore, innerLow, innerHigh, forward);
testRange("until", stateStore, null, middle, forward);
testRange("from", stateStore, middle, null, forward);
testRange("untilBetween", stateStore, null, innerHighBetween, forward);
testRange("fromBetween", stateStore, innerLowBetween, null, forward);
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinMaterializationIntegrationTest method getTopology.
private Topology getTopology(final Properties streamsConfig, final String queryableStoreName) {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
if (queryable) {
materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableStoreName).withValueSerde(Serdes.String());
} else {
materialized = Materialized.with(null, Serdes.String());
}
final KTable<String, String> joinResult;
if (this.materialized) {
joinResult = left.join(right, extractor, joiner, materialized);
} else {
joinResult = left.join(right, extractor, joiner);
}
joinResult.toStream().to(OUTPUT, Produced.with(null, Serdes.String()));
return builder.build(streamsConfig);
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class StandbyTaskCreationIntegrationTest method shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled.
@Test
public void shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final String stateStoreName = "myTransformState";
final StoreBuilder<KeyValueStore<Integer, Integer>> keyValueStoreBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(stateStoreName), Serdes.Integer(), Serdes.Integer()).withLoggingDisabled();
builder.addStateStore(keyValueStoreBuilder);
builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Integer(), Serdes.Integer())).transform(() -> new Transformer<Integer, Integer, KeyValue<Integer, Integer>>() {
@Override
public void init(final ProcessorContext context) {
}
@Override
public KeyValue<Integer, Integer> transform(final Integer key, final Integer value) {
return null;
}
@Override
public void close() {
}
}, stateStoreName);
final Topology topology = builder.build();
createClients(topology, streamsConfiguration(), topology, streamsConfiguration());
setStateListenersForVerification(thread -> thread.standbyTasks().isEmpty() && !thread.activeTasks().isEmpty());
startClients();
waitUntilBothClientAreOK("At least one client did not reach state RUNNING with active tasks but no stand-by tasks");
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method shouldNotEmitTombstonesWhenDeletingNonExistingRecords.
@Test
public void shouldNotEmitTombstonesWhenDeletingNonExistingRecords() {
final Topology topology = getTopology(streamsConfig, materialized ? "store" : null, leftJoin, rejoin);
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) {
final TestInputTopic<String, String> left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
final KeyValueStore<String, String> store = driver.getKeyValueStore("store");
// Deleting a record that never existed doesn't need to emit tombstones.
left.pipeInput("lhs1", (String) null);
{
assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
if (materialized) {
assertThat(asMap(store), is(emptyMap()));
}
}
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method joinShouldProduceNullsWhenValueHasNonMatchingForeignKey.
@Test
public void joinShouldProduceNullsWhenValueHasNonMatchingForeignKey() {
final Topology topology = getTopology(streamsConfig, materialized ? "store" : null, leftJoin, rejoin);
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) {
final TestInputTopic<String, String> right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
final KeyValueStore<String, String> store = driver.getKeyValueStore("store");
left.pipeInput("lhs1", "lhsValue1|rhs1");
// no output for a new inner join on a non-existent FK
// the left join of course emits the half-joined output
assertThat(outputTopic.readKeyValuesToMap(), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,null)")) : emptyMap()));
if (materialized) {
assertThat(asMap(store), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,null)")) : emptyMap()));
}
// "moving" our subscription to another non-existent FK results in an unnecessary tombstone for inner join,
// since it impossible to know whether the prior FK existed or not (and thus whether any results have
// previously been emitted)
// The left join emits a _necessary_ update (since the lhs record has actually changed)
left.pipeInput("lhs1", "lhsValue1|rhs2");
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", leftJoin ? "(lhsValue1|rhs2,null)" : null))));
if (materialized) {
assertThat(asMap(store), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs2,null)")) : emptyMap()));
}
// of course, moving it again to yet another non-existent FK has the same effect
left.pipeInput("lhs1", "lhsValue1|rhs3");
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", leftJoin ? "(lhsValue1|rhs3,null)" : null))));
if (materialized) {
assertThat(asMap(store), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs3,null)")) : emptyMap()));
}
// Adding an RHS record now, so that we can demonstrate "moving" from a non-existent FK to an existent one
// This RHS key was previously referenced, but it's not referenced now, so adding this record should
// result in no changes whatsoever.
right.pipeInput("rhs1", "rhsValue1");
assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
if (materialized) {
assertThat(asMap(store), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs3,null)")) : emptyMap()));
}
// now, we change to a FK that exists, and see the join completes
left.pipeInput("lhs1", "lhsValue1|rhs1");
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"))));
if (materialized) {
assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"))));
}
// but if we update it again to a non-existent one, we'll get a tombstone for the inner join, and the
// left join updates appropriately.
left.pipeInput("lhs1", "lhsValue1|rhs2");
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", leftJoin ? "(lhsValue1|rhs2,null)" : null))));
if (materialized) {
assertThat(asMap(store), is(leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs2,null)")) : emptyMap()));
}
}
}
Aggregations