use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KStreamImplTest method shouldPreserveSerdesForOperators.
// specifically testing the deprecated variant
@SuppressWarnings({ "rawtypes", "deprecation" })
@Test
public void shouldPreserveSerdesForOperators() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream(Collections.singleton("topic-1"), stringConsumed);
final KTable<String, String> table1 = builder.table("topic-2", stringConsumed);
final GlobalKTable<String, String> table2 = builder.globalTable("topic-2", stringConsumed);
final ConsumedInternal<String, String> consumedInternal = new ConsumedInternal<>(stringConsumed);
final KeyValueMapper<String, String, String> selector = (key, value) -> key;
final KeyValueMapper<String, String, Iterable<KeyValue<String, String>>> flatSelector = (key, value) -> Collections.singleton(new KeyValue<>(key, value));
final ValueMapper<String, String> mapper = value -> value;
final ValueMapper<String, Iterable<String>> flatMapper = Collections::singleton;
final ValueJoiner<String, String, String> joiner = (value1, value2) -> value1;
assertEquals(((AbstractStream) stream1.filter((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.filter((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.filterNot((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.filterNot((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertNull(((AbstractStream) stream1.selectKey(selector)).keySerde());
assertEquals(((AbstractStream) stream1.selectKey(selector)).valueSerde(), consumedInternal.valueSerde());
assertNull(((AbstractStream) stream1.map(KeyValue::new)).keySerde());
assertNull(((AbstractStream) stream1.map(KeyValue::new)).valueSerde());
assertEquals(((AbstractStream) stream1.mapValues(mapper)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.mapValues(mapper)).valueSerde());
assertNull(((AbstractStream) stream1.flatMap(flatSelector)).keySerde());
assertNull(((AbstractStream) stream1.flatMap(flatSelector)).valueSerde());
assertEquals(((AbstractStream) stream1.flatMapValues(flatMapper)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.flatMapValues(flatMapper)).valueSerde());
assertNull(((AbstractStream) stream1.transform(transformerSupplier)).keySerde());
assertNull(((AbstractStream) stream1.transform(transformerSupplier)).valueSerde());
assertEquals(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).valueSerde());
assertNull(((AbstractStream) stream1.merge(stream1)).keySerde());
assertNull(((AbstractStream) stream1.merge(stream1)).valueSerde());
assertEquals(((AbstractStream) stream1.through("topic-3")).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.through("topic-3")).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.through("topic-3", Produced.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.through("topic-3", Produced.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) stream1.repartition()).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.repartition()).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.repartition(Repartitioned.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.repartition(Repartitioned.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupByKey()).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.groupByKey()).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.groupByKey(Grouped.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupByKey(Grouped.with(mySerde, mySerde))).valueSerde(), mySerde);
assertNull(((AbstractStream) stream1.groupBy(selector)).keySerde());
assertEquals(((AbstractStream) stream1.groupBy(selector)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.groupBy(selector, Grouped.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupBy(selector, Grouped.with(mySerde, mySerde))).valueSerde(), mySerde);
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.join(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.join(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.join(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.join(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.leftJoin(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.leftJoin(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.join(table2, selector, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.join(table2, selector, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table2, selector, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.leftJoin(table2, selector, joiner)).valueSerde());
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KStreamImplTest method shouldPropagateRepartitionFlagAfterGlobalKTableJoin.
@Test
public void shouldPropagateRepartitionFlagAfterGlobalKTableJoin() {
final StreamsBuilder builder = new StreamsBuilder();
final GlobalKTable<String, String> globalKTable = builder.globalTable("globalTopic");
final KeyValueMapper<String, String, String> kvMappper = (k, v) -> k + v;
final ValueJoiner<String, String, String> valueJoiner = (v1, v2) -> v1 + v2;
builder.<String, String>stream("topic").selectKey((k, v) -> v).join(globalKTable, kvMappper, valueJoiner).groupByKey().count();
final Pattern repartitionTopicPattern = Pattern.compile("Sink: .*-repartition");
final String topology = builder.build().describe().toString();
final Matcher matcher = repartitionTopicPattern.matcher(topology);
assertTrue(matcher.find());
final String match = matcher.group();
assertThat(match, notNullValue());
assertTrue(match.endsWith("repartition"));
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class StreamsGraphTest method shouldBeAbleToBuildTopologyIncrementally.
// Test builds topology in succesive manner but only graph node not yet processed written to topology
@Test
public void shouldBeAbleToBuildTopologyIncrementally() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream("topic");
final KStream<String, String> streamII = builder.stream("other-topic");
final ValueJoiner<String, String, String> valueJoiner = (v, v2) -> v + v2;
final KStream<String, String> joinedStream = stream.join(streamII, valueJoiner, JoinWindows.of(ofMillis(5000)));
// build step one
assertEquals(expectedJoinedTopology, builder.build().describe().toString());
final KStream<String, String> filteredJoinStream = joinedStream.filter((k, v) -> v.equals("foo"));
// build step two
assertEquals(expectedJoinedFilteredTopology, builder.build().describe().toString());
filteredJoinStream.mapValues(v -> v + "some value").to("output-topic");
// build step three
assertEquals(expectedFullTopology, builder.build().describe().toString());
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldGenerateTasksForAllCreatedPartitions.
@Test
public void shouldGenerateTasksForAllCreatedPartitions() {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
// KStream with 3 partitions
final KStream<Object, Object> stream1 = streamsBuilder.stream("topic1").map((KeyValueMapper<Object, Object, KeyValue<Object, Object>>) KeyValue::new);
// KTable with 4 partitions
final KTable<Object, Long> table1 = streamsBuilder.table("topic3").groupBy(KeyValue::new).count();
// joining the stream and the table
// this triggers the enforceCopartitioning() routine in the StreamsPartitionAssignor,
// forcing the stream.map to get repartitioned to a topic with four partitions.
stream1.join(table1, (ValueJoiner<Object, Object, Void>) (value1, value2) -> null);
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(asList(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog", APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog"), asList(4, 4)));
final MockInternalTopicManager mockInternalTopicManager = configureDefault();
subscriptions.put(client, new Subscription(asList("topic1", "topic3"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-topic3-STATE-STORE-0000000002-changelog", 4);
expectedCreatedInternalTopics.put(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 4);
// check if all internal topics were created as expected
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = asList(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2), new TopicPartition("topic3", 0), new TopicPartition("topic3", 1), new TopicPartition("topic3", 2), new TopicPartition("topic3", 3), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 0), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 1), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 2), new TopicPartition(APPLICATION_ID + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 3), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 0), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 1), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 2), new TopicPartition(APPLICATION_ID + "-KSTREAM-MAP-0000000001-repartition", 3));
// check if we created a task for all expected topicPartitions.
assertThat(new HashSet<>(assignment.get(client).partitions()), equalTo(new HashSet<>(expectedAssignment)));
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks.
@Test
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks() {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
final KStream<Object, Object> stream1 = streamsBuilder.stream("topic1").selectKey((key, value) -> null).groupByKey().count(Materialized.as("count")).toStream().map((KeyValueMapper<Object, Long, KeyValue<Object, Object>>) (key, value) -> null);
streamsBuilder.stream("unknownTopic").selectKey((key, value) -> null).join(stream1, (ValueJoiner<Object, Object, Void>) (value1, value2) -> null, JoinWindows.of(ofMillis(0)));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
final MockInternalTopicManager mockInternalTopicManager = configureDefault();
subscriptions.put(client, new Subscription(Collections.singletonList("unknownTopic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(mockInternalTopicManager.readyTopics.isEmpty(), equalTo(true));
assertThat(assignment.get(client).partitions().isEmpty(), equalTo(true));
}
Aggregations