Search in sources :

Example 1 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.

@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
    builder = new CorruptedInternalTopologyBuilder();
    topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
    final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
    final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
    final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
    inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
    streamsBuilder.buildAndOptimizeTopology();
    configureDefault();
    subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) KafkaException(org.apache.kafka.common.KafkaException) Collections.singletonList(java.util.Collections.singletonList) AdminClient(org.apache.kafka.clients.admin.AdminClient) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) Arrays.asList(java.util.Arrays.asList) Duration(java.time.Duration) Map(java.util.Map) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) TASK_0_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_0) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) TASK_0_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_1) Set(java.util.Set) TASK_0_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_2) TASK_0_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_3) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) EMPTY_TASKS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_TASKS) RebalanceProtocol(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) RunWith(org.junit.runner.RunWith) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Capture(org.easymock.Capture) KTable(org.apache.kafka.streams.kstream.KTable) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Properties(java.util.Properties) Utils.mkSortedSet(org.apache.kafka.common.utils.Utils.mkSortedSet) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Grouped(org.apache.kafka.streams.kstream.Grouped) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) InternalConfig(org.apache.kafka.streams.StreamsConfig.InternalConfig) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) Assert.assertEquals(org.junit.Assert.assertEquals) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) SortedSet(java.util.SortedSet) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) ByteBuffer(java.nio.ByteBuffer) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Collections.singleton(java.util.Collections.singleton) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) Parameterized(org.junit.runners.Parameterized) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyList(java.util.Collections.emptyList) LATEST_SUPPORTED_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.LATEST_SUPPORTED_VERSION) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) TASK_2_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_0) UUID(java.util.UUID) TASK_2_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_1) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) AssignmentTestUtils.getInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.getInfo) Collectors(java.util.stream.Collectors) AssignorConfiguration(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Materialized(org.apache.kafka.streams.kstream.Materialized) StreamsPartitionAssignor.assignTasksToThreads(org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.assignTasksToThreads) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) TaskId(org.apache.kafka.streams.processor.TaskId) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) EMPTY_CHANGELOG_END_OFFSETS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_CHANGELOG_END_OFFSETS) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) AssignmentTestUtils.createMockAdminClientForAssignor(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.createMockAdminClientForAssignor) HashSet(java.util.HashSet) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Admin(org.apache.kafka.clients.admin.Admin) Collections.singletonMap(java.util.Collections.singletonMap) UUID_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_1) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) UUID_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_2) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) TASK_1_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_1) TASK_1_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_0) TASK_1_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_3) TASK_1_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_2) EasyMock.expect(org.easymock.EasyMock.expect) ConfigException(org.apache.kafka.common.config.ConfigException) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskAssignor(org.apache.kafka.streams.processor.internals.assignment.TaskAssignor) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) Collections(java.util.Collections) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 2 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignBasic.

@Test
public void testAssignBasic() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final List<String> topics = asList("topic1", "topic2");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    final Set<TaskId> prevTasks10 = mkSet(TASK_0_0);
    final Set<TaskId> prevTasks11 = mkSet(TASK_0_1);
    final Set<TaskId> prevTasks20 = mkSet(TASK_0_2);
    final Set<TaskId> standbyTasks10 = EMPTY_TASKS;
    final Set<TaskId> standbyTasks11 = mkSet(TASK_0_2);
    final Set<TaskId> standbyTasks20 = mkSet(TASK_0_0);
    createMockTaskManager(prevTasks10, standbyTasks10);
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store-changelog"), singletonList(3)));
    configureDefaultPartitionAssignor();
    subscriptions.put("consumer10", new Subscription(topics, getInfo(UUID_1, prevTasks10, standbyTasks10).encode()));
    subscriptions.put("consumer11", new Subscription(topics, getInfo(UUID_1, prevTasks11, standbyTasks11).encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, prevTasks20, standbyTasks20).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    // check the assignment
    assertEquals(mkSet(mkSet(t1p0, t2p0), mkSet(t1p1, t2p1)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
    assertEquals(mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
    // check assignment info
    // the first consumer
    final AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    final Set<TaskId> allActiveTasks = new HashSet<>(info10.activeTasks());
    // the second consumer
    final AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
    allActiveTasks.addAll(info11.activeTasks());
    assertEquals(mkSet(TASK_0_0, TASK_0_1), allActiveTasks);
    // the third consumer
    final AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    allActiveTasks.addAll(info20.activeTasks());
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads.

@Test
public void shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1");
    builder.addProcessor("processorII", new MockApiProcessorSupplier<>(), "source2");
    final List<PartitionInfo> localInfos = asList(new PartitionInfo("topic1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 3, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 3, Node.noNode(), new Node[0], new Node[0]));
    final Cluster localMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), localInfos, emptySet(), emptySet());
    final List<String> topics = asList("topic1", "topic2");
    configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    subscriptions.put("consumer11", new Subscription(topics, defaultSubscriptionInfo.encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(localMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    // check assigned partitions
    assertEquals(mkSet(mkSet(t2p2, t1p0, t1p2, t2p0), mkSet(t1p1, t2p1, t1p3, t2p3)), mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
    // the first consumer
    final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
    final List<TaskId> expectedInfo10TaskIds = asList(TASK_0_0, TASK_0_2, TASK_1_0, TASK_1_2);
    assertEquals(expectedInfo10TaskIds, info10.activeTasks());
    // the second consumer
    final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
    final List<TaskId> expectedInfo11TaskIds = asList(TASK_0_1, TASK_0_3, TASK_1_1, TASK_1_3);
    assertEquals(expectedInfo11TaskIds, info11.activeTasks());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 4 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut.

@Test
public void shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut() {
    final StreamsConfig config = new StreamsConfig(configProps());
    final StreamsBuilder streamsBuilder = new StreamsBuilder();
    streamsBuilder.table("topic1", Materialized.as("store"));
    final String client = "client1";
    builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
    topologyMetadata = new TopologyMetadata(builder, config);
    createDefaultMockTaskManager();
    EasyMock.replay(taskManager);
    partitionAssignor.configure(configProps());
    final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(time, config, mockClientSupplier.restoreConsumer, false) {

        @Override
        public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
            if (topics.isEmpty()) {
                return emptySet();
            }
            throw new TimeoutException("KABOOM!");
        }
    };
    partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
    subscriptions.put(client, new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    assertThrows(TimeoutException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
Also used : InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 5 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldNotFailOnBranchedMultiLevelRepartitionConnectedTopology.

@Test
public void shouldNotFailOnBranchedMultiLevelRepartitionConnectedTopology() {
    // Test out a topology with 3 level of sub-topology as:
    // 0
    // /   \
    // 1    3
    // \  /
    // 2
    // where each pair of the sub topology is connected by repartition topic.
    // The purpose of this test is to verify the robustness of the stream partition assignor algorithm,
    // especially whether it could build the repartition topic counts (step zero) with a complex topology.
    // The traversal path 0 -> 1 -> 2 -> 3 hits the case where sub-topology 2 will be initialized while its
    // parent 3 hasn't been initialized yet.
    builder.addSource(null, "KSTREAM-SOURCE-0000000000", null, null, null, "input-stream");
    builder.addProcessor("KSTREAM-FLATMAPVALUES-0000000001", new MockApiProcessorSupplier<>(), "KSTREAM-SOURCE-0000000000");
    builder.addProcessor("KSTREAM-BRANCH-0000000002", new MockApiProcessorSupplier<>(), "KSTREAM-FLATMAPVALUES-0000000001");
    builder.addProcessor("KSTREAM-BRANCHCHILD-0000000003", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
    builder.addProcessor("KSTREAM-BRANCHCHILD-0000000004", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
    builder.addProcessor("KSTREAM-MAP-0000000005", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000003");
    builder.addInternalTopic("odd_store-repartition", InternalTopicProperties.empty());
    builder.addProcessor("odd_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000005");
    builder.addSink("odd_store-repartition-sink", "odd_store-repartition", null, null, null, "odd_store-repartition-filter");
    builder.addSource(null, "odd_store-repartition-source", null, null, null, "odd_store-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000006", new MockApiProcessorSupplier<>(), "odd_store-repartition-source");
    builder.addProcessor("KTABLE-TOSTREAM-0000000010", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000006");
    builder.addProcessor("KSTREAM-PEEK-0000000011", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000010");
    builder.addProcessor("KSTREAM-MAP-0000000012", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000011");
    builder.addInternalTopic("odd_store_2-repartition", InternalTopicProperties.empty());
    builder.addProcessor("odd_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000012");
    builder.addSink("odd_store_2-repartition-sink", "odd_store_2-repartition", null, null, null, "odd_store_2-repartition-filter");
    builder.addSource(null, "odd_store_2-repartition-source", null, null, null, "odd_store_2-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000013", new MockApiProcessorSupplier<>(), "odd_store_2-repartition-source");
    builder.addProcessor("KSTREAM-MAP-0000000017", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000004");
    builder.addInternalTopic("even_store-repartition", InternalTopicProperties.empty());
    builder.addProcessor("even_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000017");
    builder.addSink("even_store-repartition-sink", "even_store-repartition", null, null, null, "even_store-repartition-filter");
    builder.addSource(null, "even_store-repartition-source", null, null, null, "even_store-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000018", new MockApiProcessorSupplier<>(), "even_store-repartition-source");
    builder.addProcessor("KTABLE-TOSTREAM-0000000022", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000018");
    builder.addProcessor("KSTREAM-PEEK-0000000023", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000022");
    builder.addProcessor("KSTREAM-MAP-0000000024", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000023");
    builder.addInternalTopic("even_store_2-repartition", InternalTopicProperties.empty());
    builder.addProcessor("even_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000024");
    builder.addSink("even_store_2-repartition-sink", "even_store_2-repartition", null, null, null, "even_store_2-repartition-filter");
    builder.addSource(null, "even_store_2-repartition-source", null, null, null, "even_store_2-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000025", new MockApiProcessorSupplier<>(), "even_store_2-repartition-source");
    builder.addProcessor("KTABLE-JOINTHIS-0000000030", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000013");
    builder.addProcessor("KTABLE-JOINOTHER-0000000031", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000025");
    builder.addProcessor("KTABLE-MERGE-0000000029", new MockApiProcessorSupplier<>(), "KTABLE-JOINTHIS-0000000030", "KTABLE-JOINOTHER-0000000031");
    builder.addProcessor("KTABLE-TOSTREAM-0000000032", new MockApiProcessorSupplier<>(), "KTABLE-MERGE-0000000029");
    final List<String> topics = asList("input-stream", "test-even_store-repartition", "test-even_store_2-repartition", "test-odd_store-repartition", "test-odd_store_2-repartition");
    configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    final Cluster metadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), Collections.singletonList(new PartitionInfo("input-stream", 0, Node.noNode(), new Node[0], new Node[0])), emptySet(), emptySet());
    // This shall fail if we have bugs in the repartition topic creation due to the inconsistent order of sub-topologies.
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
}
Also used : Cluster(org.apache.kafka.common.Cluster) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Test(org.junit.Test)

Aggregations

GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)33 Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)33 Test (org.junit.Test)29 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)24 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)18 TaskId (org.apache.kafka.streams.processor.TaskId)17 MockKeyValueStoreBuilder (org.apache.kafka.test.MockKeyValueStoreBuilder)15 HashSet (java.util.HashSet)13 HashMap (java.util.HashMap)10 TopicPartition (org.apache.kafka.common.TopicPartition)10 Map (java.util.Map)9 Cluster (org.apache.kafka.common.Cluster)9 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)9 ArrayList (java.util.ArrayList)8 Collections.emptyMap (java.util.Collections.emptyMap)8 Collections.singletonMap (java.util.Collections.singletonMap)8 AdminClient (org.apache.kafka.clients.admin.AdminClient)8 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)8 StreamsConfig (org.apache.kafka.streams.StreamsConfig)8 Matchers.anEmptyMap (org.hamcrest.Matchers.anEmptyMap)8