Search in sources :

Example 1 with Node

use of org.apache.kafka.common.Node in project kafka by apache.

the class Sender method run.

/**
     * Run a single iteration of sending
     * 
     * @param now
     *            The current POSIX time in milliseconds
     */
void run(long now) {
    Cluster cluster = metadata.fetch();
    // get the list of partitions with data ready to send
    RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(cluster, now);
    // if there are any partitions whose leaders are not known yet, force metadata update
    if (!result.unknownLeaderTopics.isEmpty()) {
        // and request metadata update, since there are messages to send to the topic.
        for (String topic : result.unknownLeaderTopics) this.metadata.add(topic);
        this.metadata.requestUpdate();
    }
    // remove any nodes we aren't ready to send to
    Iterator<Node> iter = result.readyNodes.iterator();
    long notReadyTimeout = Long.MAX_VALUE;
    while (iter.hasNext()) {
        Node node = iter.next();
        if (!this.client.ready(node, now)) {
            iter.remove();
            notReadyTimeout = Math.min(notReadyTimeout, this.client.connectionDelay(node, now));
        }
    }
    // create produce requests
    Map<Integer, List<ProducerBatch>> batches = this.accumulator.drain(cluster, result.readyNodes, this.maxRequestSize, now);
    if (guaranteeMessageOrder) {
        // Mute all the partitions drained
        for (List<ProducerBatch> batchList : batches.values()) {
            for (ProducerBatch batch : batchList) this.accumulator.mutePartition(batch.topicPartition);
        }
    }
    List<ProducerBatch> expiredBatches = this.accumulator.abortExpiredBatches(this.requestTimeout, now);
    // update sensors
    for (ProducerBatch expiredBatch : expiredBatches) this.sensors.recordErrors(expiredBatch.topicPartition.topic(), expiredBatch.recordCount);
    sensors.updateProduceRequestMetrics(batches);
    // If we have any nodes that are ready to send + have sendable data, poll with 0 timeout so this can immediately
    // loop and try sending more data. Otherwise, the timeout is determined by nodes that have partitions with data
    // that isn't yet sendable (e.g. lingering, backing off). Note that this specifically does not include nodes
    // with sendable data that aren't ready to send since they would cause busy looping.
    long pollTimeout = Math.min(result.nextReadyCheckDelayMs, notReadyTimeout);
    if (!result.readyNodes.isEmpty()) {
        log.trace("Nodes with data ready to send: {}", result.readyNodes);
        pollTimeout = 0;
    }
    sendProduceRequests(batches, now);
    // if some partitions are already ready to be sent, the select time would be 0;
    // otherwise if some partition already has some data accumulated but not ready yet,
    // the select time will be the time difference between now and its linger expiry time;
    // otherwise the select time will be the time difference between now and the metadata expiry time;
    this.client.poll(pollTimeout, now);
}
Also used : Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) List(java.util.List)

Example 2 with Node

use of org.apache.kafka.common.Node in project kafka by apache.

the class AbstractCoordinator method close.

protected void close(long timeoutMs) {
    try {
        closeHeartbeatThread();
    } finally {
        // needs this lock to complete and terminate after close flag is set.
        synchronized (this) {
            maybeLeaveGroup();
            // At this point, there may be pending commits (async commits or sync commits that were
            // interrupted using wakeup) and the leave group request which have been queued, but not
            // yet sent to the broker. Wait up to close timeout for these pending requests to be processed.
            // If coordinator is not known, requests are aborted.
            Node coordinator = coordinator();
            if (coordinator != null && !client.awaitPendingRequests(coordinator, timeoutMs))
                log.warn("Close timed out with {} pending requests to coordinator, terminating client connections for group {}.", client.pendingRequestCount(coordinator), groupId);
        }
    }
}
Also used : Node(org.apache.kafka.common.Node)

Example 3 with Node

use of org.apache.kafka.common.Node in project kafka by apache.

the class Fetcher method sendListOffsetRequests.

/**
     * Search the offsets by target times for the specified partitions.
     *
     * @param requireTimestamps true if we should fail with an UnsupportedVersionException if the broker does
     *                         not support fetching precise timestamps for offsets
     * @param timestampsToSearch the mapping between partitions and target time
     * @return A response which can be polled to obtain the corresponding timestamps and offsets.
     */
private RequestFuture<Map<TopicPartition, OffsetData>> sendListOffsetRequests(final boolean requireTimestamps, final Map<TopicPartition, Long> timestampsToSearch) {
    // Group the partitions by node.
    final Map<Node, Map<TopicPartition, Long>> timestampsToSearchByNode = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
        TopicPartition tp = entry.getKey();
        PartitionInfo info = metadata.fetch().partition(tp);
        if (info == null) {
            metadata.add(tp.topic());
            log.debug("Partition {} is unknown for fetching offset, wait for metadata refresh", tp);
            return RequestFuture.staleMetadata();
        } else if (info.leader() == null) {
            log.debug("Leader for partition {} unavailable for fetching offset, wait for metadata refresh", tp);
            return RequestFuture.leaderNotAvailable();
        } else {
            Node node = info.leader();
            Map<TopicPartition, Long> topicData = timestampsToSearchByNode.get(node);
            if (topicData == null) {
                topicData = new HashMap<>();
                timestampsToSearchByNode.put(node, topicData);
            }
            topicData.put(entry.getKey(), entry.getValue());
        }
    }
    final RequestFuture<Map<TopicPartition, OffsetData>> listOffsetRequestsFuture = new RequestFuture<>();
    final Map<TopicPartition, OffsetData> fetchedTimestampOffsets = new HashMap<>();
    final AtomicInteger remainingResponses = new AtomicInteger(timestampsToSearchByNode.size());
    for (Map.Entry<Node, Map<TopicPartition, Long>> entry : timestampsToSearchByNode.entrySet()) {
        sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps).addListener(new RequestFutureListener<Map<TopicPartition, OffsetData>>() {

            @Override
            public void onSuccess(Map<TopicPartition, OffsetData> value) {
                synchronized (listOffsetRequestsFuture) {
                    fetchedTimestampOffsets.putAll(value);
                    if (remainingResponses.decrementAndGet() == 0 && !listOffsetRequestsFuture.isDone())
                        listOffsetRequestsFuture.complete(fetchedTimestampOffsets);
                }
            }

            @Override
            public void onFailure(RuntimeException e) {
                synchronized (listOffsetRequestsFuture) {
                    // This may cause all the requests to be retried, but should be rare.
                    if (!listOffsetRequestsFuture.isDone())
                        listOffsetRequestsFuture.raise(e);
                }
            }
        });
    }
    return listOffsetRequestsFuture;
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Example 4 with Node

use of org.apache.kafka.common.Node in project kafka by apache.

the class StreamPartitionAssignor method onAssignment.

/**
     * @throws TaskAssignmentException if there is no task id for one of the partitions specified
     */
@Override
public void onAssignment(Assignment assignment) {
    List<TopicPartition> partitions = new ArrayList<>(assignment.partitions());
    Collections.sort(partitions, PARTITION_COMPARATOR);
    AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
    this.standbyTasks = info.standbyTasks;
    this.activeTasks = new HashMap<>();
    // could be duplicated if one task has more than one assigned partitions
    if (partitions.size() != info.activeTasks.size()) {
        throw new TaskAssignmentException(String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()));
    }
    for (int i = 0; i < partitions.size(); i++) {
        TopicPartition partition = partitions.get(i);
        TaskId id = info.activeTasks.get(i);
        Set<TopicPartition> assignedPartitions = activeTasks.get(id);
        if (assignedPartitions == null) {
            assignedPartitions = new HashSet<>();
            activeTasks.put(id, assignedPartitions);
        }
        assignedPartitions.add(partition);
    }
    this.partitionsByHostState = info.partitionsByHost;
    final Collection<Set<TopicPartition>> values = partitionsByHostState.values();
    final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>();
    for (Set<TopicPartition> value : values) {
        for (TopicPartition topicPartition : value) {
            topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0]));
        }
    }
    metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo);
}
Also used : TaskAssignmentException(org.apache.kafka.streams.errors.TaskAssignmentException) TaskId(org.apache.kafka.streams.processor.TaskId) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 5 with Node

use of org.apache.kafka.common.Node in project kafka by apache.

the class LeaderAndIsrRequest method toStruct.

@Override
protected Struct toStruct() {
    short version = version();
    Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.requestSchema(version));
    struct.set(CONTROLLER_ID_KEY_NAME, controllerId);
    struct.set(CONTROLLER_EPOCH_KEY_NAME, controllerEpoch);
    List<Struct> partitionStatesData = new ArrayList<>(partitionStates.size());
    for (Map.Entry<TopicPartition, PartitionState> entry : partitionStates.entrySet()) {
        Struct partitionStateData = struct.instance(PARTITION_STATES_KEY_NAME);
        TopicPartition topicPartition = entry.getKey();
        partitionStateData.set(TOPIC_KEY_NAME, topicPartition.topic());
        partitionStateData.set(PARTITION_KEY_NAME, topicPartition.partition());
        PartitionState partitionState = entry.getValue();
        partitionStateData.set(CONTROLLER_EPOCH_KEY_NAME, partitionState.controllerEpoch);
        partitionStateData.set(LEADER_KEY_NAME, partitionState.leader);
        partitionStateData.set(LEADER_EPOCH_KEY_NAME, partitionState.leaderEpoch);
        partitionStateData.set(ISR_KEY_NAME, partitionState.isr.toArray());
        partitionStateData.set(ZK_VERSION_KEY_NAME, partitionState.zkVersion);
        partitionStateData.set(REPLICAS_KEY_NAME, partitionState.replicas.toArray());
        partitionStatesData.add(partitionStateData);
    }
    struct.set(PARTITION_STATES_KEY_NAME, partitionStatesData.toArray());
    List<Struct> leadersData = new ArrayList<>(liveLeaders.size());
    for (Node leader : liveLeaders) {
        Struct leaderData = struct.instance(LIVE_LEADERS_KEY_NAME);
        leaderData.set(END_POINT_ID_KEY_NAME, leader.id());
        leaderData.set(HOST_KEY_NAME, leader.host());
        leaderData.set(PORT_KEY_NAME, leader.port());
        leadersData.add(leaderData);
    }
    struct.set(LIVE_LEADERS_KEY_NAME, leadersData.toArray());
    return struct;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) HashMap(java.util.HashMap) Map(java.util.Map) Struct(org.apache.kafka.common.protocol.types.Struct)

Aggregations

Node (org.apache.kafka.common.Node)420 Test (org.junit.jupiter.api.Test)150 Cluster (org.apache.kafka.common.Cluster)132 TopicPartition (org.apache.kafka.common.TopicPartition)129 HashMap (java.util.HashMap)122 Test (org.junit.Test)109 PartitionInfo (org.apache.kafka.common.PartitionInfo)98 MockClient (org.apache.kafka.clients.MockClient)93 ArrayList (java.util.ArrayList)88 HashSet (java.util.HashSet)67 MockTime (org.apache.kafka.common.utils.MockTime)62 Metadata (org.apache.kafka.clients.Metadata)59 Map (java.util.Map)56 LinkedHashMap (java.util.LinkedHashMap)55 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)50 List (java.util.List)46 LogContext (org.apache.kafka.common.utils.LogContext)42 Time (org.apache.kafka.common.utils.Time)41 Set (java.util.Set)40 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)40