Search in sources :

Example 11 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsPartitionAssignorTest method testAssignWithInternalTopicThatsSourceIsAnotherInternalTopic.

@Test
public void testAssignWithInternalTopicThatsSourceIsAnotherInternalTopic() {
    builder.addInternalTopic("topicX", InternalTopicProperties.empty());
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addSink("sink1", "topicX", null, null, null, "processor1");
    builder.addSource(null, "source2", null, null, null, "topicX");
    builder.addInternalTopic("topicZ", InternalTopicProperties.empty());
    builder.addProcessor("processor2", new MockApiProcessorSupplier<>(), "source2");
    builder.addSink("sink2", "topicZ", null, null, null, "processor2");
    builder.addSource(null, "source3", null, null, null, "topicZ");
    final List<String> topics = asList("topic1", APPLICATION_ID + "-topicX", APPLICATION_ID + "-topicZ");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    final MockInternalTopicManager internalTopicManager = configureDefault();
    subscriptions.put("consumer10", new Subscription(topics, defaultSubscriptionInfo.encode()));
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    // check prepared internal topics
    assertEquals(2, internalTopicManager.readyTopics.size());
    assertEquals(allTasks.size(), (long) internalTopicManager.readyTopics.get(APPLICATION_ID + "-topicZ"));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Test(org.junit.Test)

Example 12 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled.

@Test
public void shouldReturnAllActiveTasksToPreviousOwnerRegardlessOfBalanceAndTriggerRebalanceIfEndOffsetFetchFailsAndHighAvailabilityEnabled() {
    final long rebalanceInterval = 5 * 60 * 1000L;
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    createMockTaskManager(allTasks);
    adminClient = EasyMock.createMock(AdminClient.class);
    expect(adminClient.listOffsets(anyObject())).andThrow(new StreamsException("Should be handled"));
    configurePartitionAssignorWith(singletonMap(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, rebalanceInterval));
    final String firstConsumer = "consumer1";
    final String newConsumer = "consumer2";
    subscriptions.put(firstConsumer, new Subscription(singletonList("source1"), getInfo(UUID_1, allTasks).encode()));
    subscriptions.put(newConsumer, new Subscription(singletonList("source1"), getInfo(UUID_2, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final AssignmentInfo firstConsumerUserData = AssignmentInfo.decode(assignments.get(firstConsumer).userData());
    final List<TaskId> firstConsumerActiveTasks = firstConsumerUserData.activeTasks();
    final AssignmentInfo newConsumerUserData = AssignmentInfo.decode(assignments.get(newConsumer).userData());
    final List<TaskId> newConsumerActiveTasks = newConsumerUserData.activeTasks();
    // The tasks were returned to their prior owner
    final ArrayList<TaskId> sortedExpectedTasks = new ArrayList<>(allTasks);
    Collections.sort(sortedExpectedTasks);
    assertThat(firstConsumerActiveTasks, equalTo(sortedExpectedTasks));
    assertThat(newConsumerActiveTasks, empty());
    // There is a rebalance scheduled
    assertThat(time.milliseconds() + rebalanceInterval, anyOf(is(firstConsumerUserData.nextRebalanceMs()), is(newConsumerUserData.nextRebalanceMs())));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) StreamsException(org.apache.kafka.streams.errors.StreamsException) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 13 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method shouldScheduleProbingRebalanceOnThisClientIfWarmupTasksRequired.

@Test
public void shouldScheduleProbingRebalanceOnThisClientIfWarmupTasksRequired() {
    final long rebalanceInterval = 5 * 60 * 1000L;
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2);
    createMockTaskManager(allTasks);
    createMockAdminClient(getTopicPartitionOffsetsMap(singletonList(APPLICATION_ID + "-store1-changelog"), singletonList(3)));
    configurePartitionAssignorWith(singletonMap(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, rebalanceInterval));
    final String firstConsumer = "consumer1";
    final String newConsumer = "consumer2";
    subscriptions.put(firstConsumer, new Subscription(singletonList("source1"), getInfo(UUID_1, allTasks).encode()));
    subscriptions.put(newConsumer, new Subscription(singletonList("source1"), getInfo(UUID_2, EMPTY_TASKS).encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    final List<TaskId> firstConsumerActiveTasks = AssignmentInfo.decode(assignments.get(firstConsumer).userData()).activeTasks();
    final List<TaskId> newConsumerActiveTasks = AssignmentInfo.decode(assignments.get(newConsumer).userData()).activeTasks();
    final ArrayList<TaskId> sortedExpectedTasks = new ArrayList<>(allTasks);
    Collections.sort(sortedExpectedTasks);
    assertThat(firstConsumerActiveTasks, equalTo(sortedExpectedTasks));
    assertThat(newConsumerActiveTasks, empty());
    assertThat(referenceContainer.assignmentErrorCode.get(), equalTo(AssignorError.NONE.code()));
    final long nextScheduledRebalanceOnThisClient = AssignmentInfo.decode(assignments.get(firstConsumer).userData()).nextRebalanceMs();
    final long nextScheduledRebalanceOnOtherClient = AssignmentInfo.decode(assignments.get(newConsumer).userData()).nextRebalanceMs();
    assertThat(nextScheduledRebalanceOnThisClient, equalTo(time.milliseconds() + rebalanceInterval));
    assertThat(nextScheduledRebalanceOnOtherClient, equalTo(Long.MAX_VALUE));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) Test(org.junit.Test)

Example 14 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class ConsumerCoordinator method onLeaderElected.

@Override
protected Map<String, ByteBuffer> onLeaderElected(String leaderId, String assignmentStrategy, List<JoinGroupResponseData.JoinGroupResponseMember> allSubscriptions, boolean skipAssignment) {
    ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy);
    if (assignor == null)
        throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
    String assignorName = assignor.name();
    Set<String> allSubscribedTopics = new HashSet<>();
    Map<String, Subscription> subscriptions = new HashMap<>();
    // collect all the owned partitions
    Map<String, List<TopicPartition>> ownedPartitions = new HashMap<>();
    for (JoinGroupResponseData.JoinGroupResponseMember memberSubscription : allSubscriptions) {
        Subscription subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberSubscription.metadata()));
        subscription.setGroupInstanceId(Optional.ofNullable(memberSubscription.groupInstanceId()));
        subscriptions.put(memberSubscription.memberId(), subscription);
        allSubscribedTopics.addAll(subscription.topics());
        ownedPartitions.put(memberSubscription.memberId(), subscription.ownedPartitions());
    }
    // the leader will begin watching for changes to any of the topics the group is interested in,
    // which ensures that all metadata changes will eventually be seen
    updateGroupSubscription(allSubscribedTopics);
    isLeader = true;
    if (skipAssignment) {
        log.info("Skipped assignment for returning static leader at generation {}. The static leader " + "will continue with its existing assignment.", generation().generationId);
        assignmentSnapshot = metadataSnapshot;
        return Collections.emptyMap();
    }
    log.debug("Performing assignment using strategy {} with subscriptions {}", assignorName, subscriptions);
    Map<String, Assignment> assignments = assignor.assign(metadata.fetch(), new GroupSubscription(subscriptions)).groupAssignment();
    // the "generation" of ownedPartition inside the assignor
    if (protocol == RebalanceProtocol.COOPERATIVE && !assignorName.equals(COOPERATIVE_STICKY_ASSIGNOR_NAME)) {
        validateCooperativeAssignment(ownedPartitions, assignments);
    }
    maybeUpdateGroupSubscription(assignorName, assignments, allSubscribedTopics);
    // metadataSnapshot could be updated when the subscription is updated therefore
    // we must take the assignment snapshot after.
    assignmentSnapshot = metadataSnapshot;
    log.info("Finished assignment for group at generation {}: {}", generation().generationId, assignments);
    Map<String, ByteBuffer> groupAssignment = new HashMap<>();
    for (Map.Entry<String, Assignment> assignmentEntry : assignments.entrySet()) {
        ByteBuffer buffer = ConsumerProtocol.serializeAssignment(assignmentEntry.getValue());
        groupAssignment.put(assignmentEntry.getKey(), buffer);
    }
    return groupAssignment;
}
Also used : HashMap(java.util.HashMap) JoinGroupResponseData(org.apache.kafka.common.message.JoinGroupResponseData) ByteBuffer(java.nio.ByteBuffer) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) List(java.util.List) ArrayList(java.util.ArrayList) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) ConsumerPartitionAssignor(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Map(java.util.Map) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 15 with GroupSubscription

use of org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription in project kafka by apache.

the class StreamsAssignmentScaleTest method completeLargeAssignment.

private void completeLargeAssignment(final int numPartitions, final int numClients, final int numThreadsPerClient, final int numStandbys, final Class<? extends TaskAssignor> taskAssignor) {
    final List<String> topic = singletonList("topic");
    final Map<TopicPartition, Long> changelogEndOffsets = new HashMap<>();
    for (int p = 0; p < numPartitions; ++p) {
        changelogEndOffsets.put(new TopicPartition(APPLICATION_ID + "-store-changelog", p), 100_000L);
    }
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    for (int p = 0; p < numPartitions; ++p) {
        partitionInfos.add(new PartitionInfo("topic", p, Node.noNode(), new Node[0], new Node[0]));
    }
    final Cluster clusterMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), partitionInfos, emptySet(), emptySet());
    final Map<String, Object> configMap = new HashMap<>();
    configMap.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
    configMap.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8080");
    final InternalTopologyBuilder builder = new InternalTopologyBuilder();
    builder.addSource(null, "source", null, null, null, "topic");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final TopologyMetadata topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configMap));
    topologyMetadata.buildAndRewriteTopology();
    final Consumer<byte[], byte[]> mainConsumer = EasyMock.createNiceMock(Consumer.class);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    expect(taskManager.topologyMetadata()).andStubReturn(topologyMetadata);
    expect(mainConsumer.committed(new HashSet<>())).andStubReturn(Collections.emptyMap());
    final AdminClient adminClient = createMockAdminClientForAssignor(changelogEndOffsets);
    final ReferenceContainer referenceContainer = new ReferenceContainer();
    referenceContainer.mainConsumer = mainConsumer;
    referenceContainer.adminClient = adminClient;
    referenceContainer.taskManager = taskManager;
    referenceContainer.streamsMetadataState = EasyMock.createNiceMock(StreamsMetadataState.class);
    referenceContainer.time = new MockTime();
    configMap.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, referenceContainer);
    configMap.put(InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, taskAssignor.getName());
    configMap.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, numStandbys);
    final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(new MockTime(), new StreamsConfig(configMap), new MockClientSupplier().restoreConsumer, false);
    EasyMock.replay(taskManager, adminClient, mainConsumer);
    final StreamsPartitionAssignor partitionAssignor = new StreamsPartitionAssignor();
    partitionAssignor.configure(configMap);
    partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
    final Map<String, Subscription> subscriptions = new HashMap<>();
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            subscriptions.put(getConsumerName(i, client), new Subscription(topic, getInfo(uuidForInt(client), EMPTY_TASKS, EMPTY_TASKS).encode()));
        }
    }
    final long firstAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> firstAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long firstAssignmentEndMs = System.currentTimeMillis();
    final long firstAssignmentDuration = firstAssignmentEndMs - firstAssignmentStartMs;
    if (firstAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The first assignment took too long to complete at " + firstAssignmentDuration + "ms.");
    } else {
        log.info("First assignment took {}ms.", firstAssignmentDuration);
    }
    // Use the assignment to generate the subscriptions' prev task data for the next rebalance
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            final String consumer = getConsumerName(i, client);
            final Assignment assignment = firstAssignments.get(consumer);
            final AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
            subscriptions.put(consumer, new Subscription(topic, getInfo(uuidForInt(client), new HashSet<>(info.activeTasks()), info.standbyTasks().keySet()).encode(), assignment.partitions()));
        }
    }
    final long secondAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> secondAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long secondAssignmentEndMs = System.currentTimeMillis();
    final long secondAssignmentDuration = secondAssignmentEndMs - secondAssignmentStartMs;
    if (secondAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The second assignment took too long to complete at " + secondAssignmentDuration + "ms.");
    } else {
        log.info("Second assignment took {}ms.", secondAssignmentDuration);
    }
    assertThat(secondAssignments.size(), is(numClients * numThreadsPerClient));
}
Also used : ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) PartitionInfo(org.apache.kafka.common.PartitionInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Aggregations

GroupSubscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription)33 Subscription (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription)33 Test (org.junit.Test)29 Assignment (org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment)24 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)18 TaskId (org.apache.kafka.streams.processor.TaskId)17 MockKeyValueStoreBuilder (org.apache.kafka.test.MockKeyValueStoreBuilder)15 HashSet (java.util.HashSet)13 HashMap (java.util.HashMap)10 TopicPartition (org.apache.kafka.common.TopicPartition)10 Map (java.util.Map)9 Cluster (org.apache.kafka.common.Cluster)9 MockInternalTopicManager (org.apache.kafka.test.MockInternalTopicManager)9 ArrayList (java.util.ArrayList)8 Collections.emptyMap (java.util.Collections.emptyMap)8 Collections.singletonMap (java.util.Collections.singletonMap)8 AdminClient (org.apache.kafka.clients.admin.AdminClient)8 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)8 StreamsConfig (org.apache.kafka.streams.StreamsConfig)8 Matchers.anEmptyMap (org.hamcrest.Matchers.anEmptyMap)8