Search in sources :

Example 41 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class KafkaAdminClient method describeCluster.

@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
    final KafkaFutureImpl<Collection<Node>> describeClusterFuture = new KafkaFutureImpl<>();
    final KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
    final KafkaFutureImpl<String> clusterIdFuture = new KafkaFutureImpl<>();
    final KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>();
    final long now = time.milliseconds();
    runnable.call(new Call("listNodes", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        private boolean useMetadataRequest = false;

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            if (!useMetadataRequest) {
                return new DescribeClusterRequest.Builder(new DescribeClusterRequestData().setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()));
            } else {
                // simplifies communication with older brokers)
                return new MetadataRequest.Builder(new MetadataRequestData().setTopics(Collections.emptyList()).setAllowAutoTopicCreation(true).setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()));
            }
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            if (!useMetadataRequest) {
                DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
                Errors error = Errors.forCode(response.data().errorCode());
                if (error != Errors.NONE) {
                    ApiError apiError = new ApiError(error, response.data().errorMessage());
                    handleFailure(apiError.exception());
                    return;
                }
                Map<Integer, Node> nodes = response.nodes();
                describeClusterFuture.complete(nodes.values());
                // Controller is null if controller id is equal to NO_CONTROLLER_ID
                controllerFuture.complete(nodes.get(response.data().controllerId()));
                clusterIdFuture.complete(response.data().clusterId());
                authorizedOperationsFuture.complete(validAclOperations(response.data().clusterAuthorizedOperations()));
            } else {
                MetadataResponse response = (MetadataResponse) abstractResponse;
                describeClusterFuture.complete(response.brokers());
                controllerFuture.complete(controller(response));
                clusterIdFuture.complete(response.clusterId());
                authorizedOperationsFuture.complete(validAclOperations(response.clusterAuthorizedOperations()));
            }
        }

        private Node controller(MetadataResponse response) {
            if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID)
                return null;
            return response.controller();
        }

        @Override
        void handleFailure(Throwable throwable) {
            describeClusterFuture.completeExceptionally(throwable);
            controllerFuture.completeExceptionally(throwable);
            clusterIdFuture.completeExceptionally(throwable);
            authorizedOperationsFuture.completeExceptionally(throwable);
        }

        @Override
        boolean handleUnsupportedVersionException(final UnsupportedVersionException exception) {
            if (useMetadataRequest) {
                return false;
            }
            useMetadataRequest = true;
            return true;
        }
    }, now);
    return new DescribeClusterResult(describeClusterFuture, controllerFuture, clusterIdFuture, authorizedOperationsFuture);
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) Node(org.apache.kafka.common.Node) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) DescribeClusterRequest(org.apache.kafka.common.requests.DescribeClusterRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DescribeClusterRequestData(org.apache.kafka.common.message.DescribeClusterRequestData) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeClusterResponse(org.apache.kafka.common.requests.DescribeClusterResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequestData(org.apache.kafka.common.message.MetadataRequestData) Errors(org.apache.kafka.common.protocol.Errors) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicNameCollection(org.apache.kafka.common.TopicCollection.TopicNameCollection) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) TopicIdCollection(org.apache.kafka.common.TopicCollection.TopicIdCollection) Collection(java.util.Collection) TopicCollection(org.apache.kafka.common.TopicCollection) CreatePartitionsTopicCollection(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopicCollection) ApiError(org.apache.kafka.common.requests.ApiError) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 42 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class KafkaAdminClient method getDeleteTopicsWithIdsCall.

private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options, final Map<Uuid, KafkaFutureImpl<Void>> futures, final List<Uuid> topicIds, final Map<Uuid, ThrottlingQuotaExceededException> quotaExceededExceptions, final long now, final long deadline) {
    return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {

        @Override
        DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
            return new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopics(topicIds.stream().map(topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList())).setTimeoutMs(timeoutMs));
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            // Check for controller change
            handleNotControllerError(abstractResponse);
            // Handle server responses for particular topics.
            final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
            final List<Uuid> retryTopics = new ArrayList<>();
            final Map<Uuid, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
            for (DeletableTopicResult result : response.data().responses()) {
                KafkaFutureImpl<Void> future = futures.get(result.topicId());
                if (future == null) {
                    log.warn("Server response mentioned unknown topic ID {}", result.topicId());
                } else {
                    ApiError error = new ApiError(result.errorCode(), result.errorMessage());
                    if (error.isFailure()) {
                        if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
                            ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(response.throttleTimeMs(), error.messageWithFallback());
                            if (options.shouldRetryOnQuotaViolation()) {
                                retryTopics.add(result.topicId());
                                retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException);
                            } else {
                                future.completeExceptionally(quotaExceededException);
                            }
                        } else {
                            future.completeExceptionally(error.exception());
                        }
                    } else {
                        future.complete(null);
                    }
                }
            }
            // If there are topics to retry, retry them; complete unrealized futures otherwise.
            if (retryTopics.isEmpty()) {
                // The server should send back a response for every topic. But do a sanity check anyway.
                completeUnrealizedFutures(futures.entrySet().stream(), topic -> "The controller response did not contain a result for topic " + topic);
            } else {
                final long now = time.milliseconds();
                final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics, retryTopicQuotaExceededExceptions, now, deadline);
                runnable.call(call, now);
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            // If there were any topics retries due to a quota exceeded exception, we propagate
            // the initial error back to the caller if the request timed out.
            maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
            // Fail all the other remaining futures
            completeAllExceptionally(futures.values(), throwable);
        }
    };
}
Also used : AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) DeleteTopicsRequestData(org.apache.kafka.common.message.DeleteTopicsRequestData) ArrayList(java.util.ArrayList) DeleteTopicsRequest(org.apache.kafka.common.requests.DeleteTopicsRequest) ThrottlingQuotaExceededException(org.apache.kafka.common.errors.ThrottlingQuotaExceededException) DeleteTopicsResponse(org.apache.kafka.common.requests.DeleteTopicsResponse) Uuid(org.apache.kafka.common.Uuid) DeleteTopicState(org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState) DeletableTopicResult(org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult) ApiError(org.apache.kafka.common.requests.ApiError)

Example 43 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class KafkaAdminClient method unregisterBroker.

@Override
public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) {
    final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
    final long now = time.milliseconds();
    final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        @Override
        UnregisterBrokerRequest.Builder createRequest(int timeoutMs) {
            UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId);
            return new UnregisterBrokerRequest.Builder(data);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse;
            Errors error = Errors.forCode(response.data().errorCode());
            switch(error) {
                case NONE:
                    future.complete(null);
                    break;
                case REQUEST_TIMED_OUT:
                    throw error.exception();
                default:
                    log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message());
                    future.completeExceptionally(error.exception());
                    break;
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            future.completeExceptionally(throwable);
        }
    };
    runnable.call(call, now);
    return new UnregisterBrokerResult(future);
}
Also used : UnregisterBrokerRequestData(org.apache.kafka.common.message.UnregisterBrokerRequestData) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) UnregisterBrokerRequest(org.apache.kafka.common.requests.UnregisterBrokerRequest) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) Errors(org.apache.kafka.common.protocol.Errors) UnregisterBrokerResponse(org.apache.kafka.common.requests.UnregisterBrokerResponse)

Example 44 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class KafkaAdminClient method getCreateTopicsCall.

private Call getCreateTopicsCall(final CreateTopicsOptions options, final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> futures, final CreatableTopicCollection topics, final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions, final long now, final long deadline) {
    return new Call("createTopics", deadline, new ControllerNodeProvider()) {

        @Override
        public CreateTopicsRequest.Builder createRequest(int timeoutMs) {
            return new CreateTopicsRequest.Builder(new CreateTopicsRequestData().setTopics(topics).setTimeoutMs(timeoutMs).setValidateOnly(options.shouldValidateOnly()));
        }

        @Override
        public void handleResponse(AbstractResponse abstractResponse) {
            // Check for controller change
            handleNotControllerError(abstractResponse);
            // Handle server responses for particular topics.
            final CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
            final CreatableTopicCollection retryTopics = new CreatableTopicCollection();
            final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
            for (CreatableTopicResult result : response.data().topics()) {
                KafkaFutureImpl<TopicMetadataAndConfig> future = futures.get(result.name());
                if (future == null) {
                    log.warn("Server response mentioned unknown topic {}", result.name());
                } else {
                    ApiError error = new ApiError(result.errorCode(), result.errorMessage());
                    if (error.isFailure()) {
                        if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
                            ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(response.throttleTimeMs(), error.messageWithFallback());
                            if (options.shouldRetryOnQuotaViolation()) {
                                retryTopics.add(topics.find(result.name()).duplicate());
                                retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
                            } else {
                                future.completeExceptionally(quotaExceededException);
                            }
                        } else {
                            future.completeExceptionally(error.exception());
                        }
                    } else {
                        TopicMetadataAndConfig topicMetadataAndConfig;
                        if (result.topicConfigErrorCode() != Errors.NONE.code()) {
                            topicMetadataAndConfig = new TopicMetadataAndConfig(Errors.forCode(result.topicConfigErrorCode()).exception());
                        } else if (result.numPartitions() == CreateTopicsResult.UNKNOWN) {
                            topicMetadataAndConfig = new TopicMetadataAndConfig(new UnsupportedVersionException("Topic metadata and configs in CreateTopics response not supported"));
                        } else {
                            List<CreatableTopicConfigs> configs = result.configs();
                            Config topicConfig = new Config(configs.stream().map(this::configEntry).collect(Collectors.toSet()));
                            topicMetadataAndConfig = new TopicMetadataAndConfig(result.topicId(), result.numPartitions(), result.replicationFactor(), topicConfig);
                        }
                        future.complete(topicMetadataAndConfig);
                    }
                }
            }
            // If there are topics to retry, retry them; complete unrealized futures otherwise.
            if (retryTopics.isEmpty()) {
                // The server should send back a response for every topic. But do a sanity check anyway.
                completeUnrealizedFutures(futures.entrySet().stream(), topic -> "The controller response did not contain a result for topic " + topic);
            } else {
                final long now = time.milliseconds();
                final Call call = getCreateTopicsCall(options, futures, retryTopics, retryTopicQuotaExceededExceptions, now, deadline);
                runnable.call(call, now);
            }
        }

        private ConfigEntry configEntry(CreatableTopicConfigs config) {
            return new ConfigEntry(config.name(), config.value(), configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource())), config.isSensitive(), config.readOnly(), Collections.emptyList(), null, null);
        }

        @Override
        void handleFailure(Throwable throwable) {
            // If there were any topics retries due to a quota exceeded exception, we propagate
            // the initial error back to the caller if the request timed out.
            maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
            // Fail all the other remaining futures
            completeAllExceptionally(futures.values(), throwable);
        }
    };
}
Also used : CreateTopicsResponse(org.apache.kafka.common.requests.CreateTopicsResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) HashMap(java.util.HashMap) TopicMetadataAndConfig(org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ThrottlingQuotaExceededException(org.apache.kafka.common.errors.ThrottlingQuotaExceededException) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) CreateTopicsRequest(org.apache.kafka.common.requests.CreateTopicsRequest) TopicMetadataAndConfig(org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) CreatableTopicResult(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult) CreatableTopicConfigs(org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicConfigs) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) ApiError(org.apache.kafka.common.requests.ApiError) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 45 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class KafkaAdminClient method incrementalAlterConfigs.

private Map<ConfigResource, KafkaFutureImpl<Void>> incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs, final AlterConfigsOptions options, Collection<ConfigResource> resources, NodeProvider nodeProvider) {
    final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
    for (ConfigResource resource : resources) futures.put(resource, new KafkaFutureImpl<>());
    final long now = time.milliseconds();
    runnable.call(new Call("incrementalAlterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {

        @Override
        public IncrementalAlterConfigsRequest.Builder createRequest(int timeoutMs) {
            return new IncrementalAlterConfigsRequest.Builder(resources, configs, options.shouldValidateOnly());
        }

        @Override
        public void handleResponse(AbstractResponse abstractResponse) {
            IncrementalAlterConfigsResponse response = (IncrementalAlterConfigsResponse) abstractResponse;
            Map<ConfigResource, ApiError> errors = IncrementalAlterConfigsResponse.fromResponseData(response.data());
            for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
                KafkaFutureImpl<Void> future = entry.getValue();
                ApiException exception = errors.get(entry.getKey()).exception();
                if (exception != null) {
                    future.completeExceptionally(exception);
                } else {
                    future.complete(null);
                }
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, now);
    return futures;
}
Also used : HashMap(java.util.HashMap) IncrementalAlterConfigsRequest(org.apache.kafka.common.requests.IncrementalAlterConfigsRequest) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) ConfigResource(org.apache.kafka.common.config.ConfigResource) IncrementalAlterConfigsResponse(org.apache.kafka.common.requests.IncrementalAlterConfigsResponse) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) ApiException(org.apache.kafka.common.errors.ApiException)

Aggregations

AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)49 HashMap (java.util.HashMap)38 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)38 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)36 ArrayList (java.util.ArrayList)28 Map (java.util.Map)26 Errors (org.apache.kafka.common.protocol.Errors)21 ApiError (org.apache.kafka.common.requests.ApiError)18 KafkaFuture (org.apache.kafka.common.KafkaFuture)16 List (java.util.List)15 TreeMap (java.util.TreeMap)15 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)14 TopicPartition (org.apache.kafka.common.TopicPartition)14 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)14 LinkedList (java.util.LinkedList)13 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)13 Set (java.util.Set)12 ApiException (org.apache.kafka.common.errors.ApiException)12 HashSet (java.util.HashSet)11