Search in sources :

Example 1 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.

the class SaslClientAuthenticator method handleKafkaResponse.

private void handleKafkaResponse(RequestHeader requestHeader, byte[] responseBytes) {
    AbstractResponse response;
    ApiKeys apiKey;
    try {
        response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), requestHeader);
        apiKey = ApiKeys.forId(requestHeader.apiKey());
    } catch (SchemaException | IllegalArgumentException e) {
        LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
        throw new AuthenticationException("Invalid SASL mechanism response", e);
    }
    switch(apiKey) {
        case SASL_HANDSHAKE:
            handleSaslHandshakeResponse((SaslHandshakeResponse) response);
            break;
        default:
            throw new IllegalStateException("Unexpected API key during handshake: " + apiKey);
    }
}
Also used : ApiKeys(org.apache.kafka.common.protocol.ApiKeys) SchemaException(org.apache.kafka.common.protocol.types.SchemaException) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException)

Example 2 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.

the class SaslClientAuthenticator method receiveKafkaResponse.

private AbstractResponse receiveKafkaResponse() throws IOException {
    try {
        byte[] responseBytes = receiveResponseOrToken();
        if (responseBytes == null)
            return null;
        else {
            AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
            currentRequestHeader = null;
            return response;
        }
    } catch (SchemaException | IllegalArgumentException e) {
        LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
        setSaslState(SaslState.FAILED);
        throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
    }
}
Also used : SchemaException(org.apache.kafka.common.protocol.types.SchemaException) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) IllegalSaslStateException(org.apache.kafka.common.errors.IllegalSaslStateException)

Example 3 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method deleteRecords.

public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
    // requests need to be sent to partitions leader nodes so ...
    // ... from the provided map it's needed to create more maps grouping topic/partition per leader
    final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        futures.put(topicPartition, new KafkaFutureImpl<DeletedRecords>());
    }
    // preparing topics list for asking metadata about them
    final Set<String> topics = new HashSet<>();
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        topics.add(topicPartition.topic());
    }
    final long nowMetadata = time.milliseconds();
    final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
    // asking for topics metadata for getting partitions leaders
    runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            return new MetadataRequest.Builder(new ArrayList<>(topics), false);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            Map<String, Errors> errors = response.errors();
            Cluster cluster = response.cluster();
            // completing futures for topics with errors
            for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
                for (Map.Entry<TopicPartition, KafkaFutureImpl<DeletedRecords>> future : futures.entrySet()) {
                    if (future.getKey().topic().equals(topicError.getKey())) {
                        future.getValue().completeExceptionally(topicError.getValue().exception());
                    }
                }
            }
            // grouping topic partitions per leader
            Map<Node, Map<TopicPartition, Long>> leaders = new HashMap<>();
            for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
                // avoiding to send deletion request for topics with errors
                if (!errors.containsKey(entry.getKey().topic())) {
                    Node node = cluster.leaderFor(entry.getKey());
                    if (node != null) {
                        if (!leaders.containsKey(node))
                            leaders.put(node, new HashMap<TopicPartition, Long>());
                        leaders.get(node).put(entry.getKey(), entry.getValue().beforeOffset());
                    } else {
                        KafkaFutureImpl<DeletedRecords> future = futures.get(entry.getKey());
                        future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
                    }
                }
            }
            for (final Map.Entry<Node, Map<TopicPartition, Long>> entry : leaders.entrySet()) {
                final long nowDelete = time.milliseconds();
                final int brokerId = entry.getKey().id();
                runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {

                    @Override
                    AbstractRequest.Builder createRequest(int timeoutMs) {
                        return new DeleteRecordsRequest.Builder(timeoutMs, entry.getValue());
                    }

                    @Override
                    void handleResponse(AbstractResponse abstractResponse) {
                        DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
                        for (Map.Entry<TopicPartition, DeleteRecordsResponse.PartitionResponse> result : response.responses().entrySet()) {
                            KafkaFutureImpl<DeletedRecords> future = futures.get(result.getKey());
                            if (result.getValue().error == Errors.NONE) {
                                future.complete(new DeletedRecords(result.getValue().lowWatermark));
                            } else {
                                future.completeExceptionally(result.getValue().error.exception());
                            }
                        }
                    }

                    @Override
                    void handleFailure(Throwable throwable) {
                        completeAllExceptionally(futures.values(), throwable);
                    }
                }, nowDelete);
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, nowMetadata);
    return new DeleteRecordsResult(new HashMap<TopicPartition, KafkaFuture<DeletedRecords>>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) Node(org.apache.kafka.common.Node) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DeleteRecordsResponse(org.apache.kafka.common.requests.DeleteRecordsResponse) HashSet(java.util.HashSet) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method createAcls.

@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
    final long now = time.milliseconds();
    final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
    final List<AclCreation> aclCreations = new ArrayList<>();
    for (AclBinding acl : acls) {
        if (futures.get(acl) == null) {
            KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
            futures.put(acl, future);
            String indefinite = acl.toFilter().findIndefiniteField();
            if (indefinite == null) {
                aclCreations.add(new AclCreation(acl));
            } else {
                future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
            }
        }
    }
    runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            return new CreateAclsRequest.Builder(aclCreations);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
            List<AclCreationResponse> responses = response.aclCreationResponses();
            Iterator<AclCreationResponse> iter = responses.iterator();
            for (AclCreation aclCreation : aclCreations) {
                KafkaFutureImpl<Void> future = futures.get(aclCreation.acl());
                if (!iter.hasNext()) {
                    future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL."));
                } else {
                    AclCreationResponse creation = iter.next();
                    if (creation.error().isFailure()) {
                        future.completeExceptionally(creation.error().exception());
                    } else {
                        future.complete(null);
                    }
                }
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, now);
    return new CreateAclsResult(new HashMap<AclBinding, KafkaFuture<Void>>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) CreateAclsResponse(org.apache.kafka.common.requests.CreateAclsResponse) CreateAclsRequest(org.apache.kafka.common.requests.CreateAclsRequest) Iterator(java.util.Iterator) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) AclBinding(org.apache.kafka.common.acl.AclBinding) AclCreation(org.apache.kafka.common.requests.CreateAclsRequest.AclCreation) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) AclCreationResponse(org.apache.kafka.common.requests.CreateAclsResponse.AclCreationResponse)

Example 5 with AbstractResponse

use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeConfigs.

@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
    final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
    final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
    // The BROKER resources which we want to describe.  We must make a separate DescribeConfigs
    // request for every BROKER resource we want to describe.
    final Collection<Resource> brokerResources = new ArrayList<>();
    // The non-BROKER resources which we want to describe.  These resources can be described by a
    // single, unified DescribeConfigs request.
    final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
    for (ConfigResource resource : configResources) {
        if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
            brokerFutures.put(resource, new KafkaFutureImpl<Config>());
            brokerResources.add(configResourceToResource(resource));
        } else {
            unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
            unifiedRequestResources.add(configResourceToResource(resource));
        }
    }
    final long now = time.milliseconds();
    if (!unifiedRequestResources.isEmpty()) {
        runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
                    ConfigResource configResource = entry.getKey();
                    KafkaFutureImpl<Config> future = entry.getValue();
                    DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
                    if (config == null) {
                        future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
                        continue;
                    }
                    if (config.error().isFailure()) {
                        future.completeExceptionally(config.error().exception());
                        continue;
                    }
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    future.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(unifiedRequestFutures.values(), throwable);
            }
        }, now);
    }
    for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
        final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
        final Resource resource = configResourceToResource(entry.getKey());
        final int nodeId = Integer.parseInt(resource.name());
        runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                DescribeConfigsResponse.Config config = response.configs().get(resource);
                if (config == null) {
                    brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
                    return;
                }
                if (config.error().isFailure())
                    brokerFuture.completeExceptionally(config.error().exception());
                else {
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    brokerFuture.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                brokerFuture.completeExceptionally(throwable);
            }
        }, now);
    }
    final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
    allFutures.putAll(brokerFutures);
    allFutures.putAll(unifiedRequestFutures);
    return new DescribeConfigsResult(allFutures);
}
Also used : HashMap(java.util.HashMap) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ConfigResource(org.apache.kafka.common.config.ConfigResource) Resource(org.apache.kafka.common.requests.Resource) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ConfigResource(org.apache.kafka.common.config.ConfigResource) Map(java.util.Map) HashMap(java.util.HashMap) DescribeConfigsResponse(org.apache.kafka.common.requests.DescribeConfigsResponse)

Aggregations

AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)49 HashMap (java.util.HashMap)38 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)38 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)36 ArrayList (java.util.ArrayList)28 Map (java.util.Map)26 Errors (org.apache.kafka.common.protocol.Errors)21 ApiError (org.apache.kafka.common.requests.ApiError)18 KafkaFuture (org.apache.kafka.common.KafkaFuture)16 List (java.util.List)15 TreeMap (java.util.TreeMap)15 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)14 TopicPartition (org.apache.kafka.common.TopicPartition)14 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)14 LinkedList (java.util.LinkedList)13 MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)13 Set (java.util.Set)12 ApiException (org.apache.kafka.common.errors.ApiException)12 HashSet (java.util.HashSet)11