Search in sources :

Example 26 with Subscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.

the class StreamsPartitionAssignorTest method testCooperativeSubscription.

@Test
public void testCooperativeSubscription() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source1", "source2");
    final Set<TaskId> prevTasks = mkSet(new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1));
    final Set<TaskId> standbyTasks = mkSet(new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1), new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2));
    createMockTaskManager(prevTasks, standbyTasks);
    configureDefaultPartitionAssignor();
    final Set<String> topics = mkSet("topic1", "topic2");
    final Subscription subscription = new Subscription(new ArrayList<>(topics), partitionAssignor.subscriptionUserData(topics));
    Collections.sort(subscription.topics());
    assertEquals(asList("topic1", "topic2"), subscription.topics());
    final SubscriptionInfo info = getInfo(UUID_1, prevTasks, standbyTasks, uniqueField);
    assertEquals(info, SubscriptionInfo.decode(subscription.userData()));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Test(org.junit.Test)

Example 27 with Subscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldNotFailOnBranchedMultiLevelRepartitionConnectedTopology.

@Test
public void shouldNotFailOnBranchedMultiLevelRepartitionConnectedTopology() {
    // Test out a topology with 3 level of sub-topology as:
    // 0
    // /   \
    // 1    3
    // \  /
    // 2
    // where each pair of the sub topology is connected by repartition topic.
    // The purpose of this test is to verify the robustness of the stream partition assignor algorithm,
    // especially whether it could build the repartition topic counts (step zero) with a complex topology.
    // The traversal path 0 -> 1 -> 2 -> 3 hits the case where sub-topology 2 will be initialized while its
    // parent 3 hasn't been initialized yet.
    builder.addSource(null, "KSTREAM-SOURCE-0000000000", null, null, null, "input-stream");
    builder.addProcessor("KSTREAM-FLATMAPVALUES-0000000001", new MockApiProcessorSupplier<>(), "KSTREAM-SOURCE-0000000000");
    builder.addProcessor("KSTREAM-BRANCH-0000000002", new MockApiProcessorSupplier<>(), "KSTREAM-FLATMAPVALUES-0000000001");
    builder.addProcessor("KSTREAM-BRANCHCHILD-0000000003", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
    builder.addProcessor("KSTREAM-BRANCHCHILD-0000000004", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCH-0000000002");
    builder.addProcessor("KSTREAM-MAP-0000000005", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000003");
    builder.addInternalTopic("odd_store-repartition", InternalTopicProperties.empty());
    builder.addProcessor("odd_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000005");
    builder.addSink("odd_store-repartition-sink", "odd_store-repartition", null, null, null, "odd_store-repartition-filter");
    builder.addSource(null, "odd_store-repartition-source", null, null, null, "odd_store-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000006", new MockApiProcessorSupplier<>(), "odd_store-repartition-source");
    builder.addProcessor("KTABLE-TOSTREAM-0000000010", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000006");
    builder.addProcessor("KSTREAM-PEEK-0000000011", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000010");
    builder.addProcessor("KSTREAM-MAP-0000000012", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000011");
    builder.addInternalTopic("odd_store_2-repartition", InternalTopicProperties.empty());
    builder.addProcessor("odd_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000012");
    builder.addSink("odd_store_2-repartition-sink", "odd_store_2-repartition", null, null, null, "odd_store_2-repartition-filter");
    builder.addSource(null, "odd_store_2-repartition-source", null, null, null, "odd_store_2-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000013", new MockApiProcessorSupplier<>(), "odd_store_2-repartition-source");
    builder.addProcessor("KSTREAM-MAP-0000000017", new MockApiProcessorSupplier<>(), "KSTREAM-BRANCHCHILD-0000000004");
    builder.addInternalTopic("even_store-repartition", InternalTopicProperties.empty());
    builder.addProcessor("even_store-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000017");
    builder.addSink("even_store-repartition-sink", "even_store-repartition", null, null, null, "even_store-repartition-filter");
    builder.addSource(null, "even_store-repartition-source", null, null, null, "even_store-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000018", new MockApiProcessorSupplier<>(), "even_store-repartition-source");
    builder.addProcessor("KTABLE-TOSTREAM-0000000022", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000018");
    builder.addProcessor("KSTREAM-PEEK-0000000023", new MockApiProcessorSupplier<>(), "KTABLE-TOSTREAM-0000000022");
    builder.addProcessor("KSTREAM-MAP-0000000024", new MockApiProcessorSupplier<>(), "KSTREAM-PEEK-0000000023");
    builder.addInternalTopic("even_store_2-repartition", InternalTopicProperties.empty());
    builder.addProcessor("even_store_2-repartition-filter", new MockApiProcessorSupplier<>(), "KSTREAM-MAP-0000000024");
    builder.addSink("even_store_2-repartition-sink", "even_store_2-repartition", null, null, null, "even_store_2-repartition-filter");
    builder.addSource(null, "even_store_2-repartition-source", null, null, null, "even_store_2-repartition");
    builder.addProcessor("KSTREAM-REDUCE-0000000025", new MockApiProcessorSupplier<>(), "even_store_2-repartition-source");
    builder.addProcessor("KTABLE-JOINTHIS-0000000030", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000013");
    builder.addProcessor("KTABLE-JOINOTHER-0000000031", new MockApiProcessorSupplier<>(), "KSTREAM-REDUCE-0000000025");
    builder.addProcessor("KTABLE-MERGE-0000000029", new MockApiProcessorSupplier<>(), "KTABLE-JOINTHIS-0000000030", "KTABLE-JOINOTHER-0000000031");
    builder.addProcessor("KTABLE-TOSTREAM-0000000032", new MockApiProcessorSupplier<>(), "KTABLE-MERGE-0000000029");
    final List<String> topics = asList("input-stream", "test-even_store-repartition", "test-even_store_2-repartition", "test-odd_store-repartition", "test-odd_store_2-repartition");
    configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    final Cluster metadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), Collections.singletonList(new PartitionInfo("input-stream", 0, Node.noNode(), new Node[0], new Node[0])), emptySet(), emptySet());
    // This shall fail if we have bugs in the repartition topic creation due to the inconsistent order of sub-topologies.
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
}
Also used : Cluster(org.apache.kafka.common.Cluster) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Test(org.junit.Test)

Example 28 with Subscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignWithStates.

@Test
public void testAssignWithStates() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addSource(null, "source2", null, null, null, "topic2");
    builder.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor-1");
    builder.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor-2");
    builder.addStateStore(new MockKeyValueStoreBuilder("store3", false), "processor-2");
    final List<String> topics = asList("topic1", "topic2");
    final List<TaskId> tasks = asList(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2);
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(asList(APPLICATION_ID + "-store1-changelog", APPLICATION_ID + "-store2-changelog", APPLICATION_ID + "-store3-changelog"), asList(3, 3, 3)));
    configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    subscriptions.put("consumer11", new Subscription(topics, defaultSubscriptionInfo.encode()));
    subscriptions.put("consumer20", new Subscription(topics, getInfo(UUID_2, EMPTY_TASKS, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    // check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
    assertEquals(2, assignments.get("consumer10").partitions().size());
    assertEquals(2, assignments.get("consumer11").partitions().size());
    assertEquals(2, assignments.get("consumer20").partitions().size());
    final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
    final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
    final AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
    assertEquals(2, info10.activeTasks().size());
    assertEquals(2, info11.activeTasks().size());
    assertEquals(2, info20.activeTasks().size());
    final Set<TaskId> allTasks = new HashSet<>();
    allTasks.addAll(info10.activeTasks());
    allTasks.addAll(info11.activeTasks());
    allTasks.addAll(info20.activeTasks());
    assertEquals(new HashSet<>(tasks), allTasks);
    // check tasks for state topics
    final Map<Subtopology, InternalTopologyBuilder.TopicsInfo> topicGroups = builder.subtopologyToTopicsInfo();
    assertEquals(mkSet(TASK_0_0, TASK_0_1, TASK_0_2), tasksForState("store1", tasks, topicGroups));
    assertEquals(mkSet(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store2", tasks, topicGroups));
    assertEquals(mkSet(TASK_1_0, TASK_1_1, TASK_1_2), tasksForState("store3", tasks, topicGroups));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 29 with Subscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldReturnInterleavedAssignmentWithUnrevokedPartitionsRemovedWhenNewConsumerJoins.

@Test
public void shouldReturnInterleavedAssignmentWithUnrevokedPartitionsRemovedWhenNewConsumerJoins() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    subscriptions.put(CONSUMER_1, new Subscription(Collections.singletonList("topic1"), getInfo(UUID_1, allTasks, EMPTY_TASKS).encode(), asList(t1p0, t1p1, t1p2)));
    subscriptions.put(CONSUMER_2, new Subscription(Collections.singletonList("topic1"), getInfo(UUID_2, EMPTY_TASKS, EMPTY_TASKS).encode(), emptyList()));
    createMockTaskManager(allTasks, allTasks);
    configureDefaultPartitionAssignor();
    final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    assertThat(assignment.size(), equalTo(2));
    // The new consumer's assignment should be empty until c1 has the chance to revoke its partitions/tasks
    assertThat(assignment.get(CONSUMER_2).partitions(), equalTo(emptyList()));
    final AssignmentInfo actualAssignment = AssignmentInfo.decode(assignment.get(CONSUMER_2).userData());
    assertThat(actualAssignment.version(), is(LATEST_SUPPORTED_VERSION));
    assertThat(actualAssignment.activeTasks(), empty());
    // Note we're not asserting anything about standbys. If the assignor gave an active task to CONSUMER_2, it would
    // be converted to a standby, but we don't know whether the assignor will do that.
    assertThat(actualAssignment.partitionsByHost(), anEmptyMap());
    assertThat(actualAssignment.standbyPartitionByHost(), anEmptyMap());
    assertThat(actualAssignment.errCode(), is(0));
}
Also used : Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TaskId(org.apache.kafka.streams.processor.TaskId) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Test(org.junit.Test)

Example 30 with Subscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowIllegalStateExceptionIfAnyTopicsMissingFromChangelogEndOffsets.

@Test
public void shouldThrowIllegalStateExceptionIfAnyTopicsMissingFromChangelogEndOffsets() {
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store2", false), "processor1");
    adminClient = createMockAdminClientForAssignor(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store1-changelog"), singletonList(3)));
    configureDefault();
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    assertThrows(IllegalStateException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
Also used : GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Aggregations

Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)89 HashMap (java.util.HashMap)50 ArrayList (java.util.ArrayList)49 List (java.util.List)45 Test (org.junit.jupiter.api.Test)44 GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)39 Test (org.junit.Test)33 Collections.emptyList (java.util.Collections.emptyList)28 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)24 TopicPartition (org.apache.kafka.common.TopicPartition)20 TaskId (org.apache.kafka.streams.processor.TaskId)19 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)18 ByteBuffer (java.nio.ByteBuffer)15 MockKeyValueStoreBuilder (org.apache.kafka.test.MockKeyValueStoreBuilder)15 HashSet (java.util.HashSet)14 Map (java.util.Map)13 Cluster (org.apache.kafka.common.Cluster)9 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)9 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)9 Collections.emptyMap (java.util.Collections.emptyMap)8