Search in sources :

Example 26 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsPartitionAssignor method assign.

/*
     * This assigns tasks to consumer clients in the following steps.
     *
     * 0. decode the subscriptions to assemble the metadata for each client and check for version probing
     *
     * 1. check all repartition source topics and use internal topic manager to make sure
     *    they have been created with the right number of partitions. Also verify and/or create
     *    any changelog topics with the correct number of partitions.
     *
     * 2. use the partition grouper to generate tasks along with their assigned partitions, then use
     *    the configured TaskAssignor to construct the mapping of tasks to clients.
     *
     * 3. construct the global mapping of host to partitions to enable query routing.
     *
     * 4. within each client, assign tasks to consumer clients.
     */
@Override
public GroupAssignment assign(final Cluster metadata, final GroupSubscription groupSubscription) {
    final Map<String, Subscription> subscriptions = groupSubscription.groupSubscription();
    // ---------------- Step Zero ---------------- //
    // construct the client metadata from the decoded subscription info
    final Map<UUID, ClientMetadata> clientMetadataMap = new HashMap<>();
    final Set<TopicPartition> allOwnedPartitions = new HashSet<>();
    int minReceivedMetadataVersion = LATEST_SUPPORTED_VERSION;
    int minSupportedMetadataVersion = LATEST_SUPPORTED_VERSION;
    boolean shutdownRequested = false;
    boolean assignmentErrorFound = false;
    int futureMetadataVersion = UNKNOWN;
    for (final Map.Entry<String, Subscription> entry : subscriptions.entrySet()) {
        final String consumerId = entry.getKey();
        final Subscription subscription = entry.getValue();
        final SubscriptionInfo info = SubscriptionInfo.decode(subscription.userData());
        final int usedVersion = info.version();
        if (info.errorCode() == AssignorError.SHUTDOWN_REQUESTED.code()) {
            shutdownRequested = true;
        }
        minReceivedMetadataVersion = updateMinReceivedVersion(usedVersion, minReceivedMetadataVersion);
        minSupportedMetadataVersion = updateMinSupportedVersion(info.latestSupportedVersion(), minSupportedMetadataVersion);
        final UUID processId;
        if (usedVersion > LATEST_SUPPORTED_VERSION) {
            futureMetadataVersion = usedVersion;
            processId = FUTURE_ID;
            if (!clientMetadataMap.containsKey(FUTURE_ID)) {
                clientMetadataMap.put(FUTURE_ID, new ClientMetadata(null));
            }
        } else {
            processId = info.processId();
        }
        ClientMetadata clientMetadata = clientMetadataMap.get(processId);
        // create the new client metadata if necessary
        if (clientMetadata == null) {
            clientMetadata = new ClientMetadata(info.userEndPoint());
            clientMetadataMap.put(info.processId(), clientMetadata);
        }
        // add the consumer and any info in its subscription to the client
        clientMetadata.addConsumer(consumerId, subscription.ownedPartitions());
        final int prevSize = allOwnedPartitions.size();
        allOwnedPartitions.addAll(subscription.ownedPartitions());
        if (allOwnedPartitions.size() < prevSize + subscription.ownedPartitions().size()) {
            assignmentErrorFound = true;
        }
        clientMetadata.addPreviousTasksAndOffsetSums(consumerId, info.taskOffsetSums());
    }
    if (assignmentErrorFound) {
        log.warn("The previous assignment contains a partition more than once. " + "\t Mapping: {}", subscriptions);
    }
    try {
        final boolean versionProbing = checkMetadataVersions(minReceivedMetadataVersion, minSupportedMetadataVersion, futureMetadataVersion);
        log.debug("Constructed client metadata {} from the member subscriptions.", clientMetadataMap);
        if (shutdownRequested) {
            return new GroupAssignment(errorAssignment(clientMetadataMap, AssignorError.SHUTDOWN_REQUESTED.code()));
        }
        // parse the topology to determine the repartition source topics,
        // making sure they are created with the number of partitions as
        // the maximum of the depending sub-topologies source topics' number of partitions
        final RepartitionTopics repartitionTopics = prepareRepartitionTopics(metadata);
        final Map<TopicPartition, PartitionInfo> allRepartitionTopicPartitions = repartitionTopics.topicPartitionsInfo();
        final Cluster fullMetadata = metadata.withPartitions(allRepartitionTopicPartitions);
        log.debug("Created repartition topics {} from the parsed topology.", allRepartitionTopicPartitions.values());
        // ---------------- Step Two ---------------- //
        // construct the assignment of tasks to clients
        final Map<Subtopology, TopicsInfo> topicGroups = taskManager.topologyMetadata().subtopologyTopicsInfoMapExcluding(repartitionTopics.topologiesWithMissingInputTopics());
        final Set<String> allSourceTopics = new HashSet<>();
        final Map<Subtopology, Set<String>> sourceTopicsByGroup = new HashMap<>();
        for (final Map.Entry<Subtopology, TopicsInfo> entry : topicGroups.entrySet()) {
            allSourceTopics.addAll(entry.getValue().sourceTopics);
            sourceTopicsByGroup.put(entry.getKey(), entry.getValue().sourceTopics);
        }
        // get the tasks as partition groups from the partition grouper
        final Map<TaskId, Set<TopicPartition>> partitionsForTask = partitionGrouper.partitionGroups(sourceTopicsByGroup, fullMetadata);
        final Set<TaskId> statefulTasks = new HashSet<>();
        final boolean probingRebalanceNeeded = assignTasksToClients(fullMetadata, allSourceTopics, topicGroups, clientMetadataMap, partitionsForTask, statefulTasks);
        // ---------------- Step Three ---------------- //
        // construct the global partition assignment per host map
        final Map<HostInfo, Set<TopicPartition>> partitionsByHost = new HashMap<>();
        final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost = new HashMap<>();
        if (minReceivedMetadataVersion >= 2) {
            populatePartitionsByHostMaps(partitionsByHost, standbyPartitionsByHost, partitionsForTask, clientMetadataMap);
        }
        streamsMetadataState.onChange(partitionsByHost, standbyPartitionsByHost, fullMetadata);
        // ---------------- Step Four ---------------- //
        // compute the assignment of tasks to threads within each client and build the final group assignment
        final Map<String, Assignment> assignment = computeNewAssignment(statefulTasks, clientMetadataMap, partitionsForTask, partitionsByHost, standbyPartitionsByHost, allOwnedPartitions, minReceivedMetadataVersion, minSupportedMetadataVersion, versionProbing, probingRebalanceNeeded);
        return new GroupAssignment(assignment);
    } catch (final MissingSourceTopicException e) {
        log.error("Caught an error in the task assignment. Returning an error assignment.", e);
        return new GroupAssignment(errorAssignment(clientMetadataMap, AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.code()));
    } catch (final TaskAssignmentException e) {
        log.error("Caught an error in the task assignment. Returning an error assignment.", e);
        return new GroupAssignment(errorAssignment(clientMetadataMap, AssignorError.ASSIGNMENT_ERROR.code()));
    }
}
Also used : SortedSet(java.util.SortedSet) Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) TopicsInfo(org.apache.kafka.streams.processor.internals.InternalTopologyBuilder.TopicsInfo) PartitionInfo(org.apache.kafka.common.PartitionInfo) UUID(java.util.UUID) UUID.randomUUID(java.util.UUID.randomUUID) HashSet(java.util.HashSet) TaskAssignmentException(org.apache.kafka.streams.errors.TaskAssignmentException) Cluster(org.apache.kafka.common.Cluster) MissingSourceTopicException(org.apache.kafka.streams.errors.MissingSourceTopicException) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) Utils.filterMap(org.apache.kafka.common.utils.Utils.filterMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) HostInfo(org.apache.kafka.streams.state.HostInfo)

Example 27 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamsPartitionAssignor method populatePartitionsByHostMaps.

/**
 * Populates the global partitionsByHost and standbyPartitionsByHost maps that are sent to each member
 *
 * @param partitionsByHost a map from host to the set of partitions hosted there. Populated here.
 * @param standbyPartitionsByHost a map from host to the set of standby partitions hosted there. Populated here.
 * @param partitionsForTask a map from task to its set of assigned partitions
 * @param clientMetadataMap a map from client to its metadata and state
 */
private void populatePartitionsByHostMaps(final Map<HostInfo, Set<TopicPartition>> partitionsByHost, final Map<HostInfo, Set<TopicPartition>> standbyPartitionsByHost, final Map<TaskId, Set<TopicPartition>> partitionsForTask, final Map<UUID, ClientMetadata> clientMetadataMap) {
    for (final Map.Entry<UUID, ClientMetadata> entry : clientMetadataMap.entrySet()) {
        final HostInfo hostInfo = entry.getValue().hostInfo;
        // if application server is configured, also include host state map
        if (hostInfo != null) {
            final Set<TopicPartition> topicPartitions = new HashSet<>();
            final Set<TopicPartition> standbyPartitions = new HashSet<>();
            final ClientState state = entry.getValue().state;
            for (final TaskId id : state.activeTasks()) {
                topicPartitions.addAll(partitionsForTask.get(id));
            }
            for (final TaskId id : state.standbyTasks()) {
                standbyPartitions.addAll(partitionsForTask.get(id));
            }
            partitionsByHost.put(hostInfo, topicPartitions);
            standbyPartitionsByHost.put(hostInfo, standbyPartitions);
        }
    }
}
Also used : ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskId(org.apache.kafka.streams.processor.TaskId) TopicPartition(org.apache.kafka.common.TopicPartition) UUID(java.util.UUID) UUID.randomUUID(java.util.UUID.randomUUID) Map(java.util.Map) Utils.filterMap(org.apache.kafka.common.utils.Utils.filterMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) HostInfo(org.apache.kafka.streams.state.HostInfo) HashSet(java.util.HashSet)

Example 28 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class AssignmentInfo method encodeHostPartitionMapUsingDictionary.

private void encodeHostPartitionMapUsingDictionary(final DataOutputStream out, final Map<String, Integer> topicNameDict, final Map<HostInfo, Set<TopicPartition>> hostPartitionMap) throws IOException {
    // encode partitions by host
    out.writeInt(hostPartitionMap.size());
    // Write the topic index, partition
    for (final Map.Entry<HostInfo, Set<TopicPartition>> entry : hostPartitionMap.entrySet()) {
        writeHostInfo(out, entry.getKey());
        out.writeInt(entry.getValue().size());
        for (final TopicPartition partition : entry.getValue()) {
            out.writeInt(topicNameDict.get(partition.topic()));
            out.writeInt(partition.partition());
        }
    }
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) HostInfo(org.apache.kafka.streams.state.HostInfo)

Example 29 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamPartitionAssignor method assign.

/*
     * This assigns tasks to consumer clients in the following steps.
     *
     * 0. check all repartition source topics and use internal topic manager to make sure
     *    they have been created with the right number of partitions.
     *
     * 1. using user customized partition grouper to generate tasks along with their
     *    assigned partitions; also make sure that the task's corresponding changelog topics
     *    have been created with the right number of partitions.
     *
     * 2. using TaskAssignor to assign tasks to consumer clients.
     *    - Assign a task to a client which was running it previously.
     *      If there is no such client, assign a task to a client which has its valid local state.
     *    - A client may have more than one stream threads.
     *      The assignor tries to assign tasks to a client proportionally to the number of threads.
     *    - We try not to assign the same set of tasks to two different clients
     *    We do the assignment in one-pass. The result may not satisfy above all.
     *
     * 3. within each client, tasks are assigned to consumer clients in round-robin manner.
     */
@Override
public Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions) {
    // construct the client metadata from the decoded subscription info
    Map<UUID, ClientMetadata> clientsMetadata = new HashMap<>();
    for (Map.Entry<String, Subscription> entry : subscriptions.entrySet()) {
        String consumerId = entry.getKey();
        Subscription subscription = entry.getValue();
        SubscriptionInfo info = SubscriptionInfo.decode(subscription.userData());
        // create the new client metadata if necessary
        ClientMetadata clientMetadata = clientsMetadata.get(info.processId);
        if (clientMetadata == null) {
            clientMetadata = new ClientMetadata(info.userEndPoint);
            clientsMetadata.put(info.processId, clientMetadata);
        }
        // add the consumer to the client
        clientMetadata.addConsumer(consumerId, info);
    }
    log.info("stream-thread [{}] Constructed client metadata {} from the member subscriptions.", streamThread.getName(), clientsMetadata);
    // ---------------- Step Zero ---------------- //
    // parse the topology to determine the repartition source topics,
    // making sure they are created with the number of partitions as
    // the maximum of the depending sub-topologies source topics' number of partitions
    Map<Integer, TopologyBuilder.TopicsInfo> topicGroups = streamThread.builder.topicGroups();
    Map<String, InternalTopicMetadata> repartitionTopicMetadata = new HashMap<>();
    for (TopologyBuilder.TopicsInfo topicsInfo : topicGroups.values()) {
        for (InternalTopicConfig topic : topicsInfo.repartitionSourceTopics.values()) {
            repartitionTopicMetadata.put(topic.name(), new InternalTopicMetadata(topic));
        }
    }
    boolean numPartitionsNeeded;
    do {
        numPartitionsNeeded = false;
        for (TopologyBuilder.TopicsInfo topicsInfo : topicGroups.values()) {
            for (String topicName : topicsInfo.repartitionSourceTopics.keySet()) {
                int numPartitions = repartitionTopicMetadata.get(topicName).numPartitions;
                // try set the number of partitions for this repartition topic if it is not set yet
                if (numPartitions == UNKNOWN) {
                    for (TopologyBuilder.TopicsInfo otherTopicsInfo : topicGroups.values()) {
                        Set<String> otherSinkTopics = otherTopicsInfo.sinkTopics;
                        if (otherSinkTopics.contains(topicName)) {
                            // use the maximum of all its source topic partitions as the number of partitions
                            for (String sourceTopicName : otherTopicsInfo.sourceTopics) {
                                Integer numPartitionsCandidate;
                                // map().join().join(map())
                                if (repartitionTopicMetadata.containsKey(sourceTopicName)) {
                                    numPartitionsCandidate = repartitionTopicMetadata.get(sourceTopicName).numPartitions;
                                } else {
                                    numPartitionsCandidate = metadata.partitionCountForTopic(sourceTopicName);
                                    if (numPartitionsCandidate == null) {
                                        repartitionTopicMetadata.get(topicName).numPartitions = NOT_AVAILABLE;
                                    }
                                }
                                if (numPartitionsCandidate != null && numPartitionsCandidate > numPartitions) {
                                    numPartitions = numPartitionsCandidate;
                                }
                            }
                        }
                    }
                    // another iteration is needed
                    if (numPartitions == UNKNOWN)
                        numPartitionsNeeded = true;
                    else
                        repartitionTopicMetadata.get(topicName).numPartitions = numPartitions;
                }
            }
        }
    } while (numPartitionsNeeded);
    // augment the metadata with the newly computed number of partitions for all the
    // repartition source topics
    Map<TopicPartition, PartitionInfo> allRepartitionTopicPartitions = new HashMap<>();
    for (Map.Entry<String, InternalTopicMetadata> entry : repartitionTopicMetadata.entrySet()) {
        String topic = entry.getKey();
        Integer numPartitions = entry.getValue().numPartitions;
        for (int partition = 0; partition < numPartitions; partition++) {
            allRepartitionTopicPartitions.put(new TopicPartition(topic, partition), new PartitionInfo(topic, partition, null, new Node[0], new Node[0]));
        }
    }
    // ensure the co-partitioning topics within the group have the same number of partitions,
    // and enforce the number of partitions for those repartition topics to be the same if they
    // are co-partitioned as well.
    ensureCopartitioning(streamThread.builder.copartitionGroups(), repartitionTopicMetadata, metadata);
    // make sure the repartition source topics exist with the right number of partitions,
    // create these topics if necessary
    prepareTopic(repartitionTopicMetadata);
    metadataWithInternalTopics = metadata.withPartitions(allRepartitionTopicPartitions);
    log.debug("stream-thread [{}] Created repartition topics {} from the parsed topology.", streamThread.getName(), allRepartitionTopicPartitions.values());
    // ---------------- Step One ---------------- //
    // get the tasks as partition groups from the partition grouper
    Set<String> allSourceTopics = new HashSet<>();
    Map<Integer, Set<String>> sourceTopicsByGroup = new HashMap<>();
    for (Map.Entry<Integer, TopologyBuilder.TopicsInfo> entry : topicGroups.entrySet()) {
        allSourceTopics.addAll(entry.getValue().sourceTopics);
        sourceTopicsByGroup.put(entry.getKey(), entry.getValue().sourceTopics);
    }
    Map<TaskId, Set<TopicPartition>> partitionsForTask = streamThread.partitionGrouper.partitionGroups(sourceTopicsByGroup, metadataWithInternalTopics);
    // check if all partitions are assigned, and there are no duplicates of partitions in multiple tasks
    Set<TopicPartition> allAssignedPartitions = new HashSet<>();
    Map<Integer, Set<TaskId>> tasksByTopicGroup = new HashMap<>();
    for (Map.Entry<TaskId, Set<TopicPartition>> entry : partitionsForTask.entrySet()) {
        Set<TopicPartition> partitions = entry.getValue();
        for (TopicPartition partition : partitions) {
            if (allAssignedPartitions.contains(partition)) {
                log.warn("stream-thread [{}] Partition {} is assigned to more than one tasks: {}", streamThread.getName(), partition, partitionsForTask);
            }
        }
        allAssignedPartitions.addAll(partitions);
        TaskId id = entry.getKey();
        Set<TaskId> ids = tasksByTopicGroup.get(id.topicGroupId);
        if (ids == null) {
            ids = new HashSet<>();
            tasksByTopicGroup.put(id.topicGroupId, ids);
        }
        ids.add(id);
    }
    for (String topic : allSourceTopics) {
        List<PartitionInfo> partitionInfoList = metadataWithInternalTopics.partitionsForTopic(topic);
        if (!partitionInfoList.isEmpty()) {
            for (PartitionInfo partitionInfo : partitionInfoList) {
                TopicPartition partition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                if (!allAssignedPartitions.contains(partition)) {
                    log.warn("stream-thread [{}] Partition {} is not assigned to any tasks: {}", streamThread.getName(), partition, partitionsForTask);
                }
            }
        } else {
            log.warn("stream-thread [{}] No partitions found for topic {}", streamThread.getName(), topic);
        }
    }
    // add tasks to state change log topic subscribers
    Map<String, InternalTopicMetadata> changelogTopicMetadata = new HashMap<>();
    for (Map.Entry<Integer, TopologyBuilder.TopicsInfo> entry : topicGroups.entrySet()) {
        final int topicGroupId = entry.getKey();
        final Map<String, InternalTopicConfig> stateChangelogTopics = entry.getValue().stateChangelogTopics;
        for (InternalTopicConfig topicConfig : stateChangelogTopics.values()) {
            // the expected number of partitions is the max value of TaskId.partition + 1
            int numPartitions = UNKNOWN;
            if (tasksByTopicGroup.get(topicGroupId) != null) {
                for (TaskId task : tasksByTopicGroup.get(topicGroupId)) {
                    if (numPartitions < task.partition + 1)
                        numPartitions = task.partition + 1;
                }
                InternalTopicMetadata topicMetadata = new InternalTopicMetadata(topicConfig);
                topicMetadata.numPartitions = numPartitions;
                changelogTopicMetadata.put(topicConfig.name(), topicMetadata);
            } else {
                log.debug("stream-thread [{}] No tasks found for topic group {}", streamThread.getName(), topicGroupId);
            }
        }
    }
    prepareTopic(changelogTopicMetadata);
    log.debug("stream-thread [{}] Created state changelog topics {} from the parsed topology.", streamThread.getName(), changelogTopicMetadata);
    // ---------------- Step Two ---------------- //
    // assign tasks to clients
    Map<UUID, ClientState> states = new HashMap<>();
    for (Map.Entry<UUID, ClientMetadata> entry : clientsMetadata.entrySet()) {
        states.put(entry.getKey(), entry.getValue().state);
    }
    log.debug("stream-thread [{}] Assigning tasks {} to clients {} with number of replicas {}", streamThread.getName(), partitionsForTask.keySet(), states, numStandbyReplicas);
    final StickyTaskAssignor<UUID> taskAssignor = new StickyTaskAssignor<>(states, partitionsForTask.keySet());
    taskAssignor.assign(numStandbyReplicas);
    log.info("stream-thread [{}] Assigned tasks to clients as {}.", streamThread.getName(), states);
    // ---------------- Step Three ---------------- //
    // construct the global partition assignment per host map
    partitionsByHostState = new HashMap<>();
    for (Map.Entry<UUID, ClientMetadata> entry : clientsMetadata.entrySet()) {
        HostInfo hostInfo = entry.getValue().hostInfo;
        if (hostInfo != null) {
            final Set<TopicPartition> topicPartitions = new HashSet<>();
            final ClientState state = entry.getValue().state;
            for (final TaskId id : state.activeTasks()) {
                topicPartitions.addAll(partitionsForTask.get(id));
            }
            partitionsByHostState.put(hostInfo, topicPartitions);
        }
    }
    // within the client, distribute tasks to its owned consumers
    Map<String, Assignment> assignment = new HashMap<>();
    for (Map.Entry<UUID, ClientMetadata> entry : clientsMetadata.entrySet()) {
        final Set<String> consumers = entry.getValue().consumers;
        final ClientState state = entry.getValue().state;
        final ArrayList<TaskId> taskIds = new ArrayList<>(state.assignedTaskCount());
        final int numActiveTasks = state.activeTaskCount();
        taskIds.addAll(state.activeTasks());
        taskIds.addAll(state.standbyTasks());
        final int numConsumers = consumers.size();
        int i = 0;
        for (String consumer : consumers) {
            Map<TaskId, Set<TopicPartition>> standby = new HashMap<>();
            ArrayList<AssignedPartition> assignedPartitions = new ArrayList<>();
            final int numTaskIds = taskIds.size();
            for (int j = i; j < numTaskIds; j += numConsumers) {
                TaskId taskId = taskIds.get(j);
                if (j < numActiveTasks) {
                    for (TopicPartition partition : partitionsForTask.get(taskId)) {
                        assignedPartitions.add(new AssignedPartition(taskId, partition));
                    }
                } else {
                    Set<TopicPartition> standbyPartitions = standby.get(taskId);
                    if (standbyPartitions == null) {
                        standbyPartitions = new HashSet<>();
                        standby.put(taskId, standbyPartitions);
                    }
                    standbyPartitions.addAll(partitionsForTask.get(taskId));
                }
            }
            Collections.sort(assignedPartitions);
            List<TaskId> active = new ArrayList<>();
            List<TopicPartition> activePartitions = new ArrayList<>();
            for (AssignedPartition partition : assignedPartitions) {
                active.add(partition.taskId);
                activePartitions.add(partition.partition);
            }
            // finally, encode the assignment before sending back to coordinator
            assignment.put(consumer, new Assignment(activePartitions, new AssignmentInfo(active, standby, partitionsByHostState).encode()));
            i++;
        }
    }
    return assignment;
}
Also used : ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) PartitionInfo(org.apache.kafka.common.PartitionInfo) UUID(java.util.UUID) HashSet(java.util.HashSet) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) HashSet(java.util.HashSet) Set(java.util.Set) TaskId(org.apache.kafka.streams.processor.TaskId) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) HostInfo(org.apache.kafka.streams.state.HostInfo)

Example 30 with HostInfo

use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.

the class StreamPartitionAssignorTest method shouldMapUserEndPointToTopicPartitions.

@Test
public void shouldMapUserEndPointToTopicPartitions() throws Exception {
    final Properties properties = configProps();
    final String myEndPoint = "localhost:8080";
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
    final StreamsConfig config = new StreamsConfig(properties);
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);
    builder.addSource("source", "topic1");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source");
    builder.addSink("sink", "output", "processor");
    final List<String> topics = Utils.mkList("topic1");
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    final StreamPartitionAssignor partitionAssignor = new StreamPartitionAssignor();
    partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
    final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    final Set<TaskId> emptyTasks = Collections.emptySet();
    subscriptions.put("consumer1", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, emptyTasks, emptyTasks, myEndPoint).encode()));
    final Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    final PartitionAssignor.Assignment consumerAssignment = assignments.get("consumer1");
    final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
    final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 8080));
    assertEquals(Utils.mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2)), topicPartitions);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) Properties(java.util.Properties) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Aggregations

HostInfo (org.apache.kafka.streams.state.HostInfo)57 TopicPartition (org.apache.kafka.common.TopicPartition)31 HashSet (java.util.HashSet)30 Test (org.junit.Test)27 Set (java.util.Set)25 HashMap (java.util.HashMap)22 TaskId (org.apache.kafka.streams.processor.TaskId)18 AssignmentInfo (org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo)16 KsqlHostInfo (io.confluent.ksql.util.KsqlHostInfo)12 Map (java.util.Map)12 PartitionInfo (org.apache.kafka.common.PartitionInfo)11 ArrayList (java.util.ArrayList)10 UUID (java.util.UUID)9 Cluster (org.apache.kafka.common.Cluster)9 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)8 StreamsMetadata (org.apache.kafka.streams.StreamsMetadata)7 SubscriptionInfo (org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo)7 List (java.util.List)6 Node (org.apache.kafka.common.Node)6 KsqlNode (io.confluent.ksql.execution.streams.materialization.Locator.KsqlNode)5