use of org.apache.kafka.streams.kstream.internals.ConsumedInternal in project apache-kafka-on-k8s by banzaicloud.
the class StreamThreadTest method shouldUpdateStandbyTask.
@SuppressWarnings("unchecked")
@Test
public void shouldUpdateStandbyTask() {
final String storeName1 = "count-one";
final String storeName2 = "table-two";
final String changelogName = applicationId + "-" + storeName1 + "-changelog";
final TopicPartition partition1 = new TopicPartition(changelogName, 1);
final TopicPartition partition2 = t2p1;
internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.<Object, Long, KeyValueStore<Bytes, byte[]>>as(storeName1));
internalStreamsBuilder.table(topic2, new ConsumedInternal(), new MaterializedInternal(Materialized.as(storeName2), internalStreamsBuilder, ""));
final StreamThread thread = createStreamThread(clientId, config, false);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions(changelogName, Collections.singletonList(new PartitionInfo(changelogName, 1, null, new Node[0], new Node[0])));
restoreConsumer.assign(Utils.mkSet(partition1, partition2));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition1, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition1, 0L));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition2, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition2, 0L));
// let the store1 be restored from 0 to 10; store2 be restored from 0 to (committed offset) 5
clientSupplier.consumer.assign(Utils.mkSet(partition2));
clientSupplier.consumer.commitSync(Collections.singletonMap(partition2, new OffsetAndMetadata(5L, "")));
for (long i = 0L; i < 10L; i++) {
restoreConsumer.addRecord(new ConsumerRecord<>(changelogName, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
restoreConsumer.addRecord(new ConsumerRecord<>(topic2, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
}
thread.setState(StreamThread.State.RUNNING);
thread.rebalanceListener.onPartitionsRevoked(null);
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
// assign single partition
standbyTasks.put(task1, Collections.singleton(t1p1));
standbyTasks.put(task3, Collections.singleton(t2p1));
thread.taskManager().setAssignmentMetadata(Collections.<TaskId, Set<TopicPartition>>emptyMap(), standbyTasks);
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
thread.runOnce(-1);
final StandbyTask standbyTask1 = thread.taskManager().standbyTask(partition1);
final StandbyTask standbyTask2 = thread.taskManager().standbyTask(partition2);
final KeyValueStore<Object, Long> store1 = (KeyValueStore<Object, Long>) standbyTask1.getStore(storeName1);
final KeyValueStore<Object, Long> store2 = (KeyValueStore<Object, Long>) standbyTask2.getStore(storeName2);
assertEquals(10L, store1.approximateNumEntries());
assertEquals(5L, store2.approximateNumEntries());
assertEquals(Collections.singleton(partition2), restoreConsumer.paused());
assertEquals(1, thread.standbyRecords().size());
assertEquals(5, thread.standbyRecords().get(partition2).size());
}
Aggregations