Search in sources :

Example 56 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClientTest method testDeleteRecords.

@Test
public void testDeleteRecords() throws Exception {
    HashMap<Integer, Node> nodes = new HashMap<>();
    nodes.put(0, new Node(0, "localhost", 8121));
    List<PartitionInfo> partitionInfos = new ArrayList<>();
    partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
    TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
    TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
    TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
    TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
    TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
        env.kafkaClient().setNode(env.cluster().nodes().get(0));
        Map<TopicPartition, DeleteRecordsResponse.PartitionResponse> m = new HashMap<>();
        m.put(myTopicPartition0, new DeleteRecordsResponse.PartitionResponse(3, Errors.NONE));
        m.put(myTopicPartition1, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.OFFSET_OUT_OF_RANGE));
        m.put(myTopicPartition3, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.NOT_LEADER_FOR_PARTITION));
        m.put(myTopicPartition4, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION));
        List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
        List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 2, null, Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 3, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 4, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
        env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
        env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
        Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
        recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
        recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
        DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
        // success on records deletion for partition 0
        Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
        KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
        long lowWatermark = myTopicPartition0Result.get().lowWatermark();
        assertEquals(lowWatermark, 3);
        // "offset out of range" failure on records deletion for partition 1
        KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
        try {
            myTopicPartition1Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e0) {
            assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
        }
        // "leader not available" failure on metadata request for partition 2
        KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
        try {
            myTopicPartition2Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
        }
        // "not leader for partition" failure on records deletion for partition 3
        KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
        try {
            myTopicPartition3Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof NotLeaderForPartitionException);
        }
        // "unknown topic or partition" failure on records deletion for partition 4
        KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
        try {
            myTopicPartition4Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
        }
    }
}
Also used : HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) ArrayList(java.util.ArrayList) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DeleteRecordsResponse(org.apache.kafka.common.requests.DeleteRecordsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ExecutionException(java.util.concurrent.ExecutionException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) KafkaFuture(org.apache.kafka.common.KafkaFuture) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetOutOfRangeException(org.apache.kafka.common.errors.OffsetOutOfRangeException) Test(org.junit.Test)

Example 57 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class Fetcher method getTopicMetadata.

/**
 * Get metadata for all topics present in Kafka cluster
 *
 * @param request The MetadataRequest to send
 * @param timeout time for which getting topic metadata is attempted
 * @return The map of topics with their partition information
 */
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) {
    // Save the round trip if no topics are requested.
    if (!request.isAllTopics() && request.topics().isEmpty())
        return Collections.emptyMap();
    long start = time.milliseconds();
    long remaining = timeout;
    do {
        RequestFuture<ClientResponse> future = sendMetadataRequest(request);
        client.poll(future, remaining);
        if (future.failed() && !future.isRetriable())
            throw future.exception();
        if (future.succeeded()) {
            MetadataResponse response = (MetadataResponse) future.value().responseBody();
            Cluster cluster = response.cluster();
            Set<String> unauthorizedTopics = cluster.unauthorizedTopics();
            if (!unauthorizedTopics.isEmpty())
                throw new TopicAuthorizationException(unauthorizedTopics);
            boolean shouldRetry = false;
            Map<String, Errors> errors = response.errors();
            if (!errors.isEmpty()) {
                // if there were errors, we need to check whether they were fatal or whether
                // we should just retry
                log.debug("Topic metadata fetch included errors: {}", errors);
                for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) {
                    String topic = errorEntry.getKey();
                    Errors error = errorEntry.getValue();
                    if (error == Errors.INVALID_TOPIC_EXCEPTION)
                        throw new InvalidTopicException("Topic '" + topic + "' is invalid");
                    else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION)
                        // in the returned map
                        continue;
                    else if (error.exception() instanceof RetriableException)
                        shouldRetry = true;
                    else
                        throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception());
                }
            }
            if (!shouldRetry) {
                HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>();
                for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic));
                return topicsPartitionInfos;
            }
        }
        long elapsed = time.milliseconds() - start;
        remaining = timeout - elapsed;
        if (remaining > 0) {
            long backoff = Math.min(remaining, retryBackoffMs);
            time.sleep(backoff);
            remaining -= backoff;
        }
    } while (remaining > 0);
    throw new TimeoutException("Timeout expired while fetching topic metadata");
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Cluster(org.apache.kafka.common.Cluster) Errors(org.apache.kafka.common.protocol.Errors) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) KafkaException(org.apache.kafka.common.KafkaException) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) RetriableException(org.apache.kafka.common.errors.RetriableException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 58 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class NetworkClient method handleCompletedReceives.

/**
 * Handle any completed receives and update the response list with the responses received.
 *
 * @param responses The list of responses to update
 * @param now The current time
 */
private void handleCompletedReceives(List<ClientResponse> responses, long now) {
    for (NetworkReceive receive : this.selector.completedReceives()) {
        String source = receive.source();
        InFlightRequest req = inFlightRequests.completeNext(source);
        Struct responseStruct = parseStructMaybeUpdateThrottleTimeMetrics(receive.payload(), req.header, throttleTimeSensor, now);
        if (log.isTraceEnabled()) {
            log.trace("Completed receive from node {} for {} with correlation id {}, received {}", req.destination, req.header.apiKey(), req.header.correlationId(), responseStruct);
        }
        AbstractResponse body = AbstractResponse.parseResponse(req.header.apiKey(), responseStruct);
        if (req.isInternalRequest && body instanceof MetadataResponse)
            metadataUpdater.handleCompletedMetadataResponse(req.header, now, (MetadataResponse) body);
        else if (req.isInternalRequest && body instanceof ApiVersionsResponse)
            handleApiVersionsResponse(responses, req, now, (ApiVersionsResponse) body);
        else
            responses.add(req.completed(body, now));
    }
}
Also used : ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Struct(org.apache.kafka.common.protocol.types.Struct)

Example 59 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class MetadataCache method mergeWith.

/**
 * Merges the metadata cache's contents with the provided metadata, returning a new metadata cache. The provided
 * metadata is presumed to be more recent than the cache's metadata, and therefore all overlapping metadata will
 * be overridden.
 *
 * @param newClusterId the new cluster Id
 * @param newNodes the new set of nodes
 * @param addPartitions partitions to add
 * @param addUnauthorizedTopics unauthorized topics to add
 * @param addInternalTopics internal topics to add
 * @param newController the new controller node
 * @param topicIds the mapping from topic name to topic ID from the MetadataResponse
 * @param retainTopic returns whether a topic's metadata should be retained
 * @return the merged metadata cache
 */
MetadataCache mergeWith(String newClusterId, Map<Integer, Node> newNodes, Collection<PartitionMetadata> addPartitions, Set<String> addUnauthorizedTopics, Set<String> addInvalidTopics, Set<String> addInternalTopics, Node newController, Map<String, Uuid> topicIds, BiPredicate<String, Boolean> retainTopic) {
    Predicate<String> shouldRetainTopic = topic -> retainTopic.test(topic, internalTopics.contains(topic));
    Map<TopicPartition, PartitionMetadata> newMetadataByPartition = new HashMap<>(addPartitions.size());
    // We want the most recent topic ID. We start with the previous ID stored for retained topics and then
    // update with newest information from the MetadataResponse. We always take the latest state, removing existing
    // topic IDs if the latest state contains the topic name but not a topic ID.
    Map<String, Uuid> newTopicIds = topicIds.entrySet().stream().filter(entry -> shouldRetainTopic.test(entry.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    for (PartitionMetadata partition : addPartitions) {
        newMetadataByPartition.put(partition.topicPartition, partition);
        Uuid id = topicIds.get(partition.topic());
        if (id != null)
            newTopicIds.put(partition.topic(), id);
        else
            // Remove if the latest metadata does not have a topic ID
            newTopicIds.remove(partition.topic());
    }
    for (Map.Entry<TopicPartition, PartitionMetadata> entry : metadataByPartition.entrySet()) {
        if (shouldRetainTopic.test(entry.getKey().topic())) {
            newMetadataByPartition.putIfAbsent(entry.getKey(), entry.getValue());
        }
    }
    Set<String> newUnauthorizedTopics = fillSet(addUnauthorizedTopics, unauthorizedTopics, shouldRetainTopic);
    Set<String> newInvalidTopics = fillSet(addInvalidTopics, invalidTopics, shouldRetainTopic);
    Set<String> newInternalTopics = fillSet(addInternalTopics, internalTopics, shouldRetainTopic);
    return new MetadataCache(newClusterId, newNodes, newMetadataByPartition.values(), newUnauthorizedTopics, newInvalidTopics, newInternalTopics, newController, newTopicIds);
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) ClusterResource(org.apache.kafka.common.ClusterResource) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Set(java.util.Set) HashMap(java.util.HashMap) PartitionInfo(org.apache.kafka.common.PartitionInfo) InetSocketAddress(java.net.InetSocketAddress) Collectors(java.util.stream.Collectors) PartitionMetadata(org.apache.kafka.common.requests.MetadataResponse.PartitionMetadata) HashSet(java.util.HashSet) BiPredicate(java.util.function.BiPredicate) Cluster(org.apache.kafka.common.Cluster) List(java.util.List) Map(java.util.Map) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Collections(java.util.Collections) HashMap(java.util.HashMap) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionMetadata(org.apache.kafka.common.requests.MetadataResponse.PartitionMetadata) HashMap(java.util.HashMap) Map(java.util.Map)

Example 60 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class GetListOffsetsCallsBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    MetadataResponseData data = new MetadataResponseData();
    List<MetadataResponseTopic> mrTopicList = new ArrayList<>();
    Set<String> topics = new HashSet<>();
    for (int topicIndex = 0; topicIndex < topicCount; topicIndex++) {
        Uuid topicId = Uuid.randomUuid();
        String topicName = "topic-" + topicIndex;
        MetadataResponseTopic mrTopic = new MetadataResponseTopic().setTopicId(topicId).setName(topicName).setErrorCode((short) 0).setIsInternal(false);
        List<MetadataResponsePartition> mrPartitionList = new ArrayList<>();
        for (int partition = 0; partition < partitionCount; partition++) {
            TopicPartition tp = new TopicPartition(topicName, partition);
            topics.add(tp.topic());
            futures.put(tp, new KafkaFutureImpl<>());
            topicPartitionOffsets.put(tp, OffsetSpec.latest());
            MetadataResponsePartition mrPartition = new MetadataResponsePartition().setLeaderId(partition % numNodes).setPartitionIndex(partition).setIsrNodes(Arrays.asList(0, 1, 2)).setReplicaNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode((short) 0);
            mrPartitionList.add(mrPartition);
        }
        mrTopic.setPartitions(mrPartitionList);
        mrTopicList.add(mrTopic);
    }
    data.setTopics(new MetadataResponseData.MetadataResponseTopicCollection(mrTopicList.listIterator()));
    long deadline = 0L;
    short version = 0;
    context = new MetadataOperationContext<>(topics, new ListOffsetsOptions(), deadline, futures);
    context.setResponse(Optional.of(new MetadataResponse(data, version)));
    AdminClientUnitTestEnv adminEnv = new AdminClientUnitTestEnv(mockCluster());
    admin = (KafkaAdminClient) adminEnv.adminClient();
}
Also used : MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) AdminClientUnitTestEnv(org.apache.kafka.clients.admin.AdminClientUnitTestEnv) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) ArrayList(java.util.ArrayList) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) HashSet(java.util.HashSet) Setup(org.openjdk.jmh.annotations.Setup)

Aggregations

MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)107 Test (org.junit.jupiter.api.Test)71 HashMap (java.util.HashMap)68 TopicPartition (org.apache.kafka.common.TopicPartition)43 MockTime (org.apache.kafka.common.utils.MockTime)38 Time (org.apache.kafka.common.utils.Time)37 Node (org.apache.kafka.common.Node)33 ArrayList (java.util.ArrayList)30 MockClient (org.apache.kafka.clients.MockClient)29 Cluster (org.apache.kafka.common.Cluster)29 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)28 Errors (org.apache.kafka.common.protocol.Errors)27 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)27 Map (java.util.Map)26 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)26 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)26 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)25 List (java.util.List)24 LogContext (org.apache.kafka.common.utils.LogContext)22 HashSet (java.util.HashSet)21