use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class RocksDBSessionStoreTest method shouldFindValuesWithinMergingSessionWindowRange.
@Test
public void shouldFindValuesWithinMergingSessionWindowRange() {
final String key = "a";
sessionStore.put(new Windowed<>(key, new SessionWindow(0L, 0L)), 1L);
sessionStore.put(new Windowed<>(key, new SessionWindow(1000L, 1000L)), 2L);
final KeyValueIterator<Windowed<String>, Long> results = sessionStore.findSessions(key, -1, 1000L);
final List<KeyValue<Windowed<String>, Long>> expected = Arrays.asList(KeyValue.pair(new Windowed<>(key, new SessionWindow(0L, 0L)), 1L), KeyValue.pair(new Windowed<>(key, new SessionWindow(1000L, 1000L)), 2L));
assertEquals(expected, toList(results));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks.
@Test
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final InternalTopologyBuilder internalTopologyBuilder = StreamsBuilderTest.internalTopologyBuilder(builder);
internalTopologyBuilder.setApplicationId(applicationId);
KStream<Object, Object> stream1 = builder.stream("topic1").selectKey(new KeyValueMapper<Object, Object, Object>() {
@Override
public Object apply(final Object key, final Object value) {
return null;
}
}).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as("count")).toStream().map(new KeyValueMapper<Object, Long, KeyValue<Object, Object>>() {
@Override
public KeyValue<Object, Object> apply(final Object key, final Long value) {
return null;
}
});
builder.stream("unknownTopic").selectKey(new KeyValueMapper<Object, Object, Object>() {
@Override
public Object apply(final Object key, final Object value) {
return null;
}
}).join(stream1, new ValueJoiner() {
@Override
public Object apply(final Object value1, final Object value2) {
return null;
}
}, JoinWindows.of(0));
final UUID uuid = UUID.randomUUID();
final String client = "client1";
mockTaskManager(Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), UUID.randomUUID(), internalTopologyBuilder);
configurePartitionAssignor(Collections.<String, Object>emptyMap());
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer);
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put(client, new PartitionAssignor.Subscription(Collections.singletonList("unknownTopic"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignment = partitionAssignor.assign(metadata, subscriptions);
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(applicationId + "-count-repartition", 3);
expectedCreatedInternalTopics.put(applicationId + "-count-changelog", 3);
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = Arrays.asList(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2), new TopicPartition(applicationId + "-count-repartition", 0), new TopicPartition(applicationId + "-count-repartition", 1), new TopicPartition(applicationId + "-count-repartition", 2));
assertThat(new HashSet<>(assignment.get(client).partitions()), equalTo(new HashSet<>(expectedAssignment)));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method shouldGenerateTasksForAllCreatedPartitions.
@Test
public void shouldGenerateTasksForAllCreatedPartitions() throws Exception {
final StreamsBuilder builder = new StreamsBuilder();
final InternalTopologyBuilder internalTopologyBuilder = StreamsBuilderTest.internalTopologyBuilder(builder);
internalTopologyBuilder.setApplicationId(applicationId);
// KStream with 3 partitions
KStream<Object, Object> stream1 = builder.stream("topic1").map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
@Override
public KeyValue<Object, Object> apply(final Object key, final Object value) {
return new KeyValue<>(key, value);
}
});
// KTable with 4 partitions
KTable<Object, Long> table1 = builder.table("topic3").groupBy(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
@Override
public KeyValue<Object, Object> apply(final Object key, final Object value) {
return new KeyValue<>(key, value);
}
}).count();
// joining the stream and the table
// this triggers the enforceCopartitioning() routine in the StreamsPartitionAssignor,
// forcing the stream.map to get repartitioned to a topic with four partitions.
stream1.join(table1, new ValueJoiner() {
@Override
public Object apply(final Object value1, final Object value2) {
return null;
}
});
final UUID uuid = UUID.randomUUID();
final String client = "client1";
mockTaskManager(Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), UUID.randomUUID(), internalTopologyBuilder);
configurePartitionAssignor(Collections.<String, Object>emptyMap());
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer);
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put(client, new PartitionAssignor.Subscription(Utils.mkList("topic1", "topic3"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignment = partitionAssignor.assign(metadata, subscriptions);
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 4);
expectedCreatedInternalTopics.put(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-changelog", 4);
expectedCreatedInternalTopics.put(applicationId + "-KSTREAM-MAP-0000000001-repartition", 4);
expectedCreatedInternalTopics.put(applicationId + "-topic3-STATE-STORE-0000000002-changelog", 4);
// check if all internal topics were created as expected
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = Arrays.asList(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2), new TopicPartition("topic3", 0), new TopicPartition("topic3", 1), new TopicPartition("topic3", 2), new TopicPartition("topic3", 3), new TopicPartition(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 0), new TopicPartition(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 1), new TopicPartition(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 2), new TopicPartition(applicationId + "-KTABLE-AGGREGATE-STATE-STORE-0000000006-repartition", 3), new TopicPartition(applicationId + "-KSTREAM-MAP-0000000001-repartition", 0), new TopicPartition(applicationId + "-KSTREAM-MAP-0000000001-repartition", 1), new TopicPartition(applicationId + "-KSTREAM-MAP-0000000001-repartition", 2), new TopicPartition(applicationId + "-KSTREAM-MAP-0000000001-repartition", 3));
// check if we created a task for all expected topicPartitions.
assertThat(new HashSet<>(assignment.get(client).partitions()), equalTo(new HashSet<>(expectedAssignment)));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class CachingSessionStoreTest method shouldNotForwardChangedValuesDuringFlushWhenSendOldValuesDisabled.
@Test
public void shouldNotForwardChangedValuesDuringFlushWhenSendOldValuesDisabled() {
final Windowed<Bytes> a = new Windowed<>(keyA, new SessionWindow(0, 0));
final Windowed<String> aDeserialized = new Windowed<>("a", new SessionWindow(0, 0));
final List<KeyValue<Windowed<String>, Change<String>>> flushed = new ArrayList<>();
cachingStore.setFlushListener(new CacheFlushListener<Windowed<String>, String>() {
@Override
public void apply(final Windowed<String> key, final String newValue, final String oldValue) {
flushed.add(KeyValue.pair(key, new Change<>(newValue, oldValue)));
}
}, false);
cachingStore.put(a, "1".getBytes());
cachingStore.flush();
cachingStore.put(a, "2".getBytes());
cachingStore.flush();
assertEquals(flushed, Arrays.asList(KeyValue.pair(aDeserialized, new Change<>("1", null)), KeyValue.pair(aDeserialized, new Change<>("2", null))));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class CachingSessionStoreTest method shouldForwardChangedValuesDuringFlushWhenSendOldValuesDisabledNewRecordIsNull.
@Test
public void shouldForwardChangedValuesDuringFlushWhenSendOldValuesDisabledNewRecordIsNull() {
final Windowed<Bytes> a = new Windowed<>(keyA, new SessionWindow(0, 0));
final Windowed<String> aDeserialized = new Windowed<>("a", new SessionWindow(0, 0));
final List<KeyValue<Windowed<String>, Change<String>>> flushed = new ArrayList<>();
cachingStore.setFlushListener(new CacheFlushListener<Windowed<String>, String>() {
@Override
public void apply(final Windowed<String> key, final String newValue, final String oldValue) {
flushed.add(KeyValue.pair(key, new Change<>(newValue, oldValue)));
}
}, false);
cachingStore.put(a, "1".getBytes());
cachingStore.flush();
cachingStore.put(a, "2".getBytes());
cachingStore.flush();
cachingStore.remove(a);
cachingStore.flush();
assertEquals(flushed, Arrays.asList(KeyValue.pair(aDeserialized, new Change<>("1", null)), KeyValue.pair(aDeserialized, new Change<>("2", null)), KeyValue.pair(aDeserialized, new Change<>(null, "2"))));
}
Aggregations