Search in sources :

Example 1 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class InternalTopicManager method makeReady.

/**
     * Prepares a set of given internal topics.
     *
     * If a topic does not exist creates a new topic.
     * If a topic with the correct number of partitions exists ignores it.
     * If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
     */
public void makeReady(final Map<InternalTopicConfig, Integer> topics) {
    for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) {
        try {
            final MetadataResponse metadata = streamsKafkaClient.fetchMetadata();
            final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata);
            final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions);
            streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata);
            return;
        } catch (StreamsException ex) {
            log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i);
        }
    }
    throw new StreamsException("Could not create internal topics.");
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 2 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method newMetadataResponse.

private MetadataResponse newMetadataResponse(String topic, Errors error) {
    List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>();
    if (error == Errors.NONE) {
        for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
            partitionsMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, partitionInfo.partition(), partitionInfo.leader(), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas()), Arrays.asList(partitionInfo.offlineReplicas())));
        }
    }
    MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata);
    return new MetadataResponse(cluster.nodes(), null, MetadataResponse.NO_CONTROLLER_ID, Arrays.asList(topicMetadata));
}
Also used : ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 3 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method deleteRecords.

public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
    // requests need to be sent to partitions leader nodes so ...
    // ... from the provided map it's needed to create more maps grouping topic/partition per leader
    final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        futures.put(topicPartition, new KafkaFutureImpl<DeletedRecords>());
    }
    // preparing topics list for asking metadata about them
    final Set<String> topics = new HashSet<>();
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        topics.add(topicPartition.topic());
    }
    final long nowMetadata = time.milliseconds();
    final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
    // asking for topics metadata for getting partitions leaders
    runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            return new MetadataRequest.Builder(new ArrayList<>(topics), false);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            Map<String, Errors> errors = response.errors();
            Cluster cluster = response.cluster();
            // completing futures for topics with errors
            for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
                for (Map.Entry<TopicPartition, KafkaFutureImpl<DeletedRecords>> future : futures.entrySet()) {
                    if (future.getKey().topic().equals(topicError.getKey())) {
                        future.getValue().completeExceptionally(topicError.getValue().exception());
                    }
                }
            }
            // grouping topic partitions per leader
            Map<Node, Map<TopicPartition, Long>> leaders = new HashMap<>();
            for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
                // avoiding to send deletion request for topics with errors
                if (!errors.containsKey(entry.getKey().topic())) {
                    Node node = cluster.leaderFor(entry.getKey());
                    if (node != null) {
                        if (!leaders.containsKey(node))
                            leaders.put(node, new HashMap<TopicPartition, Long>());
                        leaders.get(node).put(entry.getKey(), entry.getValue().beforeOffset());
                    } else {
                        KafkaFutureImpl<DeletedRecords> future = futures.get(entry.getKey());
                        future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
                    }
                }
            }
            for (final Map.Entry<Node, Map<TopicPartition, Long>> entry : leaders.entrySet()) {
                final long nowDelete = time.milliseconds();
                final int brokerId = entry.getKey().id();
                runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {

                    @Override
                    AbstractRequest.Builder createRequest(int timeoutMs) {
                        return new DeleteRecordsRequest.Builder(timeoutMs, entry.getValue());
                    }

                    @Override
                    void handleResponse(AbstractResponse abstractResponse) {
                        DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
                        for (Map.Entry<TopicPartition, DeleteRecordsResponse.PartitionResponse> result : response.responses().entrySet()) {
                            KafkaFutureImpl<DeletedRecords> future = futures.get(result.getKey());
                            if (result.getValue().error == Errors.NONE) {
                                future.complete(new DeletedRecords(result.getValue().lowWatermark));
                            } else {
                                future.completeExceptionally(result.getValue().error.exception());
                            }
                        }
                    }

                    @Override
                    void handleFailure(Throwable throwable) {
                        completeAllExceptionally(futures.values(), throwable);
                    }
                }, nowDelete);
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, nowMetadata);
    return new DeleteRecordsResult(new HashMap<TopicPartition, KafkaFuture<DeletedRecords>>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) Node(org.apache.kafka.common.Node) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DeleteRecordsResponse(org.apache.kafka.common.requests.DeleteRecordsResponse) HashSet(java.util.HashSet) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeTopics.

@Override
public DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options) {
    final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
    final ArrayList<String> topicNamesList = new ArrayList<>();
    for (String topicName : topicNames) {
        if (topicNameIsUnrepresentable(topicName)) {
            KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<TopicDescription>();
            future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
            topicFutures.put(topicName, future);
        } else if (!topicFutures.containsKey(topicName)) {
            topicFutures.put(topicName, new KafkaFutureImpl<TopicDescription>());
            topicNamesList.add(topicName);
        }
    }
    final long now = time.milliseconds();
    Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {

        private boolean supportsDisablingTopicCreation = true;

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            if (supportsDisablingTopicCreation)
                return new MetadataRequest.Builder(topicNamesList, false);
            else
                return MetadataRequest.Builder.allTopics();
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            // Handle server responses for particular topics.
            Cluster cluster = response.cluster();
            Map<String, Errors> errors = response.errors();
            for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
                String topicName = entry.getKey();
                KafkaFutureImpl<TopicDescription> future = entry.getValue();
                Errors topicError = errors.get(topicName);
                if (topicError != null) {
                    future.completeExceptionally(topicError.exception());
                    continue;
                }
                if (!cluster.topics().contains(topicName)) {
                    future.completeExceptionally(new InvalidTopicException("Topic " + topicName + " not found."));
                    continue;
                }
                boolean isInternal = cluster.internalTopics().contains(topicName);
                List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName);
                List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size());
                for (PartitionInfo partitionInfo : partitionInfos) {
                    TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas()));
                    partitions.add(topicPartitionInfo);
                }
                Collections.sort(partitions, new Comparator<TopicPartitionInfo>() {

                    @Override
                    public int compare(TopicPartitionInfo tp1, TopicPartitionInfo tp2) {
                        return Integer.compare(tp1.partition(), tp2.partition());
                    }
                });
                TopicDescription topicDescription = new TopicDescription(topicName, isInternal, partitions);
                future.complete(topicDescription);
            }
        }

        private Node leader(PartitionInfo partitionInfo) {
            if (partitionInfo.leader() == null || partitionInfo.leader().id() == Node.noNode().id())
                return null;
            return partitionInfo.leader();
        }

        @Override
        boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
            if (supportsDisablingTopicCreation) {
                supportsDisablingTopicCreation = false;
                return true;
            }
            return false;
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(topicFutures.values(), throwable);
        }
    };
    if (!topicNamesList.isEmpty()) {
        runnable.call(call, now);
    }
    return new DescribeTopicsResult(new HashMap<String, KafkaFuture<TopicDescription>>(topicFutures));
}
Also used : HashMap(java.util.HashMap) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) Errors(org.apache.kafka.common.protocol.Errors) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Map(java.util.Map) HashMap(java.util.HashMap) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 5 with MetadataResponse

use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.

the class NetworkClientTest method testAuthenticationFailureWithInFlightMetadataRequest.

@Test
public void testAuthenticationFailureWithInFlightMetadataRequest() {
    int refreshBackoffMs = 50;
    MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
    Metadata metadata = new Metadata(refreshBackoffMs, 5000, new LogContext(), new ClusterResourceListeners());
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
    Cluster cluster = metadata.fetch();
    Node node1 = cluster.nodes().get(0);
    Node node2 = cluster.nodes().get(1);
    NetworkClient client = createNetworkClientWithNoVersionDiscovery(metadata);
    awaitReady(client, node1);
    metadata.requestUpdate();
    time.sleep(refreshBackoffMs);
    client.poll(0, time.milliseconds());
    Optional<Node> nodeWithPendingMetadataOpt = cluster.nodes().stream().filter(node -> client.hasInFlightRequests(node.idString())).findFirst();
    assertEquals(Optional.of(node1), nodeWithPendingMetadataOpt);
    assertFalse(client.ready(node2, time.milliseconds()));
    selector.serverAuthenticationFailed(node2.idString());
    client.poll(0, time.milliseconds());
    assertNotNull(client.authenticationException(node2));
    ByteBuffer requestBuffer = selector.completedSendBuffers().get(0).buffer();
    RequestHeader header = parseHeader(requestBuffer);
    assertEquals(ApiKeys.METADATA, header.apiKey());
    ByteBuffer responseBuffer = RequestTestUtils.serializeResponseWithHeader(metadataResponse, header.apiVersion(), header.correlationId());
    selector.delayedReceive(new DelayedReceive(node1.idString(), new NetworkReceive(node1.idString(), responseBuffer)));
    int initialUpdateVersion = metadata.updateVersion();
    client.poll(0, time.milliseconds());
    assertEquals(initialUpdateVersion + 1, metadata.updateVersion());
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) KafkaException(org.apache.kafka.common.KafkaException) ByteBuffer(java.nio.ByteBuffer) InetAddress(java.net.InetAddress) Cluster(org.apache.kafka.common.Cluster) RequestHeader(org.apache.kafka.common.requests.RequestHeader) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) SaslClientAuthenticator(org.apache.kafka.common.security.authenticator.SaslClientAuthenticator) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) LogContext(org.apache.kafka.common.utils.LogContext) ApiVersionsResponseData(org.apache.kafka.common.message.ApiVersionsResponseData) ProduceResponseData(org.apache.kafka.common.message.ProduceResponseData) TestUtils(org.apache.kafka.test.TestUtils) PRODUCE(org.apache.kafka.common.protocol.ApiKeys.PRODUCE) ApiVersionCollection(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection) Set(java.util.Set) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) List(java.util.List) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) IntStream(java.util.stream.IntStream) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) ApiVersion(org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) MockSelector(org.apache.kafka.test.MockSelector) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) UnknownHostException(java.net.UnknownHostException) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) DelayedReceive(org.apache.kafka.test.DelayedReceive) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Collections(java.util.Collections) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Node(org.apache.kafka.common.Node) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) LogContext(org.apache.kafka.common.utils.LogContext) Cluster(org.apache.kafka.common.Cluster) ByteBuffer(java.nio.ByteBuffer) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) RequestHeader(org.apache.kafka.common.requests.RequestHeader) DelayedReceive(org.apache.kafka.test.DelayedReceive) Test(org.junit.jupiter.api.Test)

Aggregations

MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)107 Test (org.junit.jupiter.api.Test)71 HashMap (java.util.HashMap)68 TopicPartition (org.apache.kafka.common.TopicPartition)43 MockTime (org.apache.kafka.common.utils.MockTime)38 Time (org.apache.kafka.common.utils.Time)37 Node (org.apache.kafka.common.Node)33 ArrayList (java.util.ArrayList)30 MockClient (org.apache.kafka.clients.MockClient)29 Cluster (org.apache.kafka.common.Cluster)29 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)28 Errors (org.apache.kafka.common.protocol.Errors)27 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)27 Map (java.util.Map)26 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)26 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)26 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)25 List (java.util.List)24 LogContext (org.apache.kafka.common.utils.LogContext)22 HashSet (java.util.HashSet)21