Search in sources :

Example 16 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class MockInternalTopicManager method makeReady.

@Override
public void makeReady(final Map<InternalTopicConfig, Integer> topics) {
    for (Map.Entry<InternalTopicConfig, Integer> entry : topics.entrySet()) {
        readyTopics.put(entry.getKey().name(), entry.getValue());
        final List<PartitionInfo> partitions = new ArrayList<>();
        for (int i = 0; i < entry.getValue(); i++) {
            partitions.add(new PartitionInfo(entry.getKey().name(), i, null, null, null));
        }
        restoreConsumer.updatePartitions(entry.getKey().name(), partitions);
    }
}
Also used : ArrayList(java.util.ArrayList) InternalTopicConfig(org.apache.kafka.streams.processor.internals.InternalTopicConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Example 17 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class RecordAccumulator method drain.

/**
     * Drain all the data for the given nodes and collate them into a list of batches that will fit within the specified
     * size on a per-node basis. This method attempts to avoid choosing the same topic-node over and over.
     *
     * @param cluster The current cluster metadata
     * @param nodes The list of node to drain
     * @param maxSize The maximum number of bytes to drain
     * @param now The current unix time in milliseconds
     * @return A list of {@link ProducerBatch} for each node specified with total size less than the requested maxSize.
     */
public Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now) {
    if (nodes.isEmpty())
        return Collections.emptyMap();
    Map<Integer, List<ProducerBatch>> batches = new HashMap<>();
    for (Node node : nodes) {
        int size = 0;
        List<PartitionInfo> parts = cluster.partitionsForNode(node.id());
        List<ProducerBatch> ready = new ArrayList<>();
        /* to make starvation less likely this loop doesn't start at 0 */
        int start = drainIndex = drainIndex % parts.size();
        do {
            PartitionInfo part = parts.get(drainIndex);
            TopicPartition tp = new TopicPartition(part.topic(), part.partition());
            // Only proceed if the partition has no in-flight batches.
            if (!muted.contains(tp)) {
                Deque<ProducerBatch> deque = getDeque(new TopicPartition(part.topic(), part.partition()));
                if (deque != null) {
                    synchronized (deque) {
                        ProducerBatch first = deque.peekFirst();
                        if (first != null) {
                            boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs;
                            // Only drain the batch if it is not during backoff period.
                            if (!backoff) {
                                if (size + first.sizeInBytes() > maxSize && !ready.isEmpty()) {
                                    // request
                                    break;
                                } else {
                                    ProducerBatch batch = deque.pollFirst();
                                    batch.close();
                                    size += batch.sizeInBytes();
                                    ready.add(batch);
                                    batch.drained(now);
                                }
                            }
                        }
                    }
                }
            }
            this.drainIndex = (this.drainIndex + 1) % parts.size();
        } while (start != drainIndex);
        batches.put(node.id(), ready);
    }
    return batches;
}
Also used : HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 18 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class Fetcher method sendListOffsetRequests.

/**
     * Search the offsets by target times for the specified partitions.
     *
     * @param requireTimestamps true if we should fail with an UnsupportedVersionException if the broker does
     *                         not support fetching precise timestamps for offsets
     * @param timestampsToSearch the mapping between partitions and target time
     * @return A response which can be polled to obtain the corresponding timestamps and offsets.
     */
private RequestFuture<Map<TopicPartition, OffsetData>> sendListOffsetRequests(final boolean requireTimestamps, final Map<TopicPartition, Long> timestampsToSearch) {
    // Group the partitions by node.
    final Map<Node, Map<TopicPartition, Long>> timestampsToSearchByNode = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
        TopicPartition tp = entry.getKey();
        PartitionInfo info = metadata.fetch().partition(tp);
        if (info == null) {
            metadata.add(tp.topic());
            log.debug("Partition {} is unknown for fetching offset, wait for metadata refresh", tp);
            return RequestFuture.staleMetadata();
        } else if (info.leader() == null) {
            log.debug("Leader for partition {} unavailable for fetching offset, wait for metadata refresh", tp);
            return RequestFuture.leaderNotAvailable();
        } else {
            Node node = info.leader();
            Map<TopicPartition, Long> topicData = timestampsToSearchByNode.get(node);
            if (topicData == null) {
                topicData = new HashMap<>();
                timestampsToSearchByNode.put(node, topicData);
            }
            topicData.put(entry.getKey(), entry.getValue());
        }
    }
    final RequestFuture<Map<TopicPartition, OffsetData>> listOffsetRequestsFuture = new RequestFuture<>();
    final Map<TopicPartition, OffsetData> fetchedTimestampOffsets = new HashMap<>();
    final AtomicInteger remainingResponses = new AtomicInteger(timestampsToSearchByNode.size());
    for (Map.Entry<Node, Map<TopicPartition, Long>> entry : timestampsToSearchByNode.entrySet()) {
        sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps).addListener(new RequestFutureListener<Map<TopicPartition, OffsetData>>() {

            @Override
            public void onSuccess(Map<TopicPartition, OffsetData> value) {
                synchronized (listOffsetRequestsFuture) {
                    fetchedTimestampOffsets.putAll(value);
                    if (remainingResponses.decrementAndGet() == 0 && !listOffsetRequestsFuture.isDone())
                        listOffsetRequestsFuture.complete(fetchedTimestampOffsets);
                }
            }

            @Override
            public void onFailure(RuntimeException e) {
                synchronized (listOffsetRequestsFuture) {
                    // This may cause all the requests to be retried, but should be rare.
                    if (!listOffsetRequestsFuture.isDone())
                        listOffsetRequestsFuture.raise(e);
                }
            }
        });
    }
    return listOffsetRequestsFuture;
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Example 19 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class KafkaConsumer method partitionsFor.

/**
     * Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it
     * does not already have any metadata about the given topic.
     *
     * @param topic The topic to get partition metadata for
     * @return The list of partitions
     * @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
     *             function is called
     * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
     *             this function is called
     * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic
     * @throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before
     *             expiration of the configured request timeout
     * @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
     */
@Override
public List<PartitionInfo> partitionsFor(String topic) {
    acquire();
    try {
        Cluster cluster = this.metadata.fetch();
        List<PartitionInfo> parts = cluster.partitionsForTopic(topic);
        if (!parts.isEmpty())
            return parts;
        Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata(new MetadataRequest.Builder(Collections.singletonList(topic)), requestTimeoutMs);
        return topicMetadata.get(topic);
    } finally {
        release();
    }
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Cluster(org.apache.kafka.common.Cluster) List(java.util.List) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 20 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class MockProducerTest method testPartitioner.

@Test
public void testPartitioner() throws Exception {
    PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null);
    PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null);
    Cluster cluster = new Cluster(null, new ArrayList<Node>(0), asList(partitionInfo0, partitionInfo1), Collections.<String>emptySet(), Collections.<String>emptySet());
    MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer());
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value");
    Future<RecordMetadata> metadata = producer.send(record);
    assertEquals("Partition should be correct", 1, metadata.get().partition());
    producer.clear();
    assertEquals("Clear should erase our history", 0, producer.history().size());
}
Also used : DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Aggregations

PartitionInfo (org.apache.kafka.common.PartitionInfo)49 TopicPartition (org.apache.kafka.common.TopicPartition)30 Test (org.junit.Test)23 HashMap (java.util.HashMap)17 ArrayList (java.util.ArrayList)15 Node (org.apache.kafka.common.Node)12 Map (java.util.Map)11 Cluster (org.apache.kafka.common.Cluster)11 HashSet (java.util.HashSet)10 Set (java.util.Set)7 TaskId (org.apache.kafka.streams.processor.TaskId)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)6 MockTime (org.apache.kafka.common.utils.MockTime)5 List (java.util.List)4 Properties (java.util.Properties)4 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)4 HostInfo (org.apache.kafka.streams.state.HostInfo)4 StreamsMetadata (org.apache.kafka.streams.state.StreamsMetadata)4 File (java.io.File)3 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)3