Search in sources :

Example 6 with KafkaFutureImpl

use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.

the class TaskManagerTest method shouldNotSendPurgeDataIfPreviousNotDone.

@Test
public void shouldNotSendPurgeDataIfPreviousNotDone() {
    final KafkaFuture<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
    final Map<TopicPartition, RecordsToDelete> recordsToDelete = Collections.singletonMap(t1p1, RecordsToDelete.beforeOffset(5L));
    final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(Collections.singletonMap(t1p1, futureDeletedRecords));
    EasyMock.expect(active.recordsToDelete()).andReturn(Collections.singletonMap(t1p1, 5L)).once();
    EasyMock.expect(adminClient.deleteRecords(recordsToDelete)).andReturn(deleteRecordsResult).once();
    replay();
    taskManager.maybePurgeCommitedRecords();
    // second call should be no-op as the previous one is not done yet
    taskManager.maybePurgeCommitedRecords();
    verify(active, adminClient);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) DeletedRecords(org.apache.kafka.clients.admin.DeletedRecords) RecordsToDelete(org.apache.kafka.clients.admin.RecordsToDelete) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) DeleteRecordsResult(org.apache.kafka.clients.admin.DeleteRecordsResult) Test(org.junit.Test)

Example 7 with KafkaFutureImpl

use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method deleteRecords.

public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
    // requests need to be sent to partitions leader nodes so ...
    // ... from the provided map it's needed to create more maps grouping topic/partition per leader
    final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        futures.put(topicPartition, new KafkaFutureImpl<DeletedRecords>());
    }
    // preparing topics list for asking metadata about them
    final Set<String> topics = new HashSet<>();
    for (TopicPartition topicPartition : recordsToDelete.keySet()) {
        topics.add(topicPartition.topic());
    }
    final long nowMetadata = time.milliseconds();
    final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
    // asking for topics metadata for getting partitions leaders
    runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            return new MetadataRequest.Builder(new ArrayList<>(topics), false);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            Map<String, Errors> errors = response.errors();
            Cluster cluster = response.cluster();
            // completing futures for topics with errors
            for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
                for (Map.Entry<TopicPartition, KafkaFutureImpl<DeletedRecords>> future : futures.entrySet()) {
                    if (future.getKey().topic().equals(topicError.getKey())) {
                        future.getValue().completeExceptionally(topicError.getValue().exception());
                    }
                }
            }
            // grouping topic partitions per leader
            Map<Node, Map<TopicPartition, Long>> leaders = new HashMap<>();
            for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
                // avoiding to send deletion request for topics with errors
                if (!errors.containsKey(entry.getKey().topic())) {
                    Node node = cluster.leaderFor(entry.getKey());
                    if (node != null) {
                        if (!leaders.containsKey(node))
                            leaders.put(node, new HashMap<TopicPartition, Long>());
                        leaders.get(node).put(entry.getKey(), entry.getValue().beforeOffset());
                    } else {
                        KafkaFutureImpl<DeletedRecords> future = futures.get(entry.getKey());
                        future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
                    }
                }
            }
            for (final Map.Entry<Node, Map<TopicPartition, Long>> entry : leaders.entrySet()) {
                final long nowDelete = time.milliseconds();
                final int brokerId = entry.getKey().id();
                runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {

                    @Override
                    AbstractRequest.Builder createRequest(int timeoutMs) {
                        return new DeleteRecordsRequest.Builder(timeoutMs, entry.getValue());
                    }

                    @Override
                    void handleResponse(AbstractResponse abstractResponse) {
                        DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
                        for (Map.Entry<TopicPartition, DeleteRecordsResponse.PartitionResponse> result : response.responses().entrySet()) {
                            KafkaFutureImpl<DeletedRecords> future = futures.get(result.getKey());
                            if (result.getValue().error == Errors.NONE) {
                                future.complete(new DeletedRecords(result.getValue().lowWatermark));
                            } else {
                                future.completeExceptionally(result.getValue().error.exception());
                            }
                        }
                    }

                    @Override
                    void handleFailure(Throwable throwable) {
                        completeAllExceptionally(futures.values(), throwable);
                    }
                }, nowDelete);
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, nowMetadata);
    return new DeleteRecordsResult(new HashMap<TopicPartition, KafkaFuture<DeletedRecords>>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) Node(org.apache.kafka.common.Node) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DeleteRecordsResponse(org.apache.kafka.common.requests.DeleteRecordsResponse) HashSet(java.util.HashSet) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) HashMap(java.util.HashMap)

Example 8 with KafkaFutureImpl

use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method createAcls.

@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
    final long now = time.milliseconds();
    final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
    final List<AclCreation> aclCreations = new ArrayList<>();
    for (AclBinding acl : acls) {
        if (futures.get(acl) == null) {
            KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
            futures.put(acl, future);
            String indefinite = acl.toFilter().findIndefiniteField();
            if (indefinite == null) {
                aclCreations.add(new AclCreation(acl));
            } else {
                future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
            }
        }
    }
    runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            return new CreateAclsRequest.Builder(aclCreations);
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
            List<AclCreationResponse> responses = response.aclCreationResponses();
            Iterator<AclCreationResponse> iter = responses.iterator();
            for (AclCreation aclCreation : aclCreations) {
                KafkaFutureImpl<Void> future = futures.get(aclCreation.acl());
                if (!iter.hasNext()) {
                    future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL."));
                } else {
                    AclCreationResponse creation = iter.next();
                    if (creation.error().isFailure()) {
                        future.completeExceptionally(creation.error().exception());
                    } else {
                        future.complete(null);
                    }
                }
            }
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(futures.values(), throwable);
        }
    }, now);
    return new CreateAclsResult(new HashMap<AclBinding, KafkaFuture<Void>>(futures));
}
Also used : HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) CreateAclsResponse(org.apache.kafka.common.requests.CreateAclsResponse) CreateAclsRequest(org.apache.kafka.common.requests.CreateAclsRequest) Iterator(java.util.Iterator) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) AclBinding(org.apache.kafka.common.acl.AclBinding) AclCreation(org.apache.kafka.common.requests.CreateAclsRequest.AclCreation) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) AclCreationResponse(org.apache.kafka.common.requests.CreateAclsResponse.AclCreationResponse)

Example 9 with KafkaFutureImpl

use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeConfigs.

@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
    final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
    final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
    // The BROKER resources which we want to describe.  We must make a separate DescribeConfigs
    // request for every BROKER resource we want to describe.
    final Collection<Resource> brokerResources = new ArrayList<>();
    // The non-BROKER resources which we want to describe.  These resources can be described by a
    // single, unified DescribeConfigs request.
    final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
    for (ConfigResource resource : configResources) {
        if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
            brokerFutures.put(resource, new KafkaFutureImpl<Config>());
            brokerResources.add(configResourceToResource(resource));
        } else {
            unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
            unifiedRequestResources.add(configResourceToResource(resource));
        }
    }
    final long now = time.milliseconds();
    if (!unifiedRequestResources.isEmpty()) {
        runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
                    ConfigResource configResource = entry.getKey();
                    KafkaFutureImpl<Config> future = entry.getValue();
                    DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
                    if (config == null) {
                        future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
                        continue;
                    }
                    if (config.error().isFailure()) {
                        future.completeExceptionally(config.error().exception());
                        continue;
                    }
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    future.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(unifiedRequestFutures.values(), throwable);
            }
        }, now);
    }
    for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
        final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
        final Resource resource = configResourceToResource(entry.getKey());
        final int nodeId = Integer.parseInt(resource.name());
        runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                DescribeConfigsResponse.Config config = response.configs().get(resource);
                if (config == null) {
                    brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
                    return;
                }
                if (config.error().isFailure())
                    brokerFuture.completeExceptionally(config.error().exception());
                else {
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    brokerFuture.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                brokerFuture.completeExceptionally(throwable);
            }
        }, now);
    }
    final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
    allFutures.putAll(brokerFutures);
    allFutures.putAll(unifiedRequestFutures);
    return new DescribeConfigsResult(allFutures);
}
Also used : HashMap(java.util.HashMap) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ConfigResource(org.apache.kafka.common.config.ConfigResource) Resource(org.apache.kafka.common.requests.Resource) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ConfigResource(org.apache.kafka.common.config.ConfigResource) Map(java.util.Map) HashMap(java.util.HashMap) DescribeConfigsResponse(org.apache.kafka.common.requests.DescribeConfigsResponse)

Example 10 with KafkaFutureImpl

use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method alterReplicaLogDirs.

@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
    final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
    for (TopicPartitionReplica replica : replicaAssignment.keySet()) futures.put(replica, new KafkaFutureImpl<Void>());
    Map<Integer, Map<TopicPartition, String>> replicaAssignmentByBroker = new HashMap<>();
    for (Map.Entry<TopicPartitionReplica, String> entry : replicaAssignment.entrySet()) {
        TopicPartitionReplica replica = entry.getKey();
        String logDir = entry.getValue();
        int brokerId = replica.brokerId();
        TopicPartition topicPartition = new TopicPartition(replica.topic(), replica.partition());
        if (!replicaAssignmentByBroker.containsKey(brokerId))
            replicaAssignmentByBroker.put(brokerId, new HashMap<TopicPartition, String>());
        replicaAssignmentByBroker.get(brokerId).put(topicPartition, logDir);
    }
    final long now = time.milliseconds();
    for (Map.Entry<Integer, Map<TopicPartition, String>> entry : replicaAssignmentByBroker.entrySet()) {
        final int brokerId = entry.getKey();
        final Map<TopicPartition, String> assignment = entry.getValue();
        runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public AbstractRequest.Builder createRequest(int timeoutMs) {
                return new AlterReplicaLogDirsRequest.Builder(assignment);
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
                for (Map.Entry<TopicPartition, Errors> responseEntry : response.responses().entrySet()) {
                    TopicPartition tp = responseEntry.getKey();
                    Errors error = responseEntry.getValue();
                    TopicPartitionReplica replica = new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId);
                    KafkaFutureImpl<Void> future = futures.get(replica);
                    if (future == null) {
                        handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
                    } else if (error == Errors.NONE) {
                        future.complete(null);
                    } else {
                        future.completeExceptionally(error.exception());
                    }
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(futures.values(), throwable);
            }
        }, now);
    }
    return new AlterReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<Void>>(futures));
}
Also used : AlterReplicaLogDirsResponse(org.apache.kafka.common.requests.AlterReplicaLogDirsResponse) HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) AlterReplicaLogDirsRequest(org.apache.kafka.common.requests.AlterReplicaLogDirsRequest) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)22 KafkaFuture (org.apache.kafka.common.KafkaFuture)18 HashMap (java.util.HashMap)17 Map (java.util.Map)11 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)11 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)10 ArrayList (java.util.ArrayList)9 TopicPartition (org.apache.kafka.common.TopicPartition)6 TimeoutException (org.apache.kafka.common.errors.TimeoutException)5 ConfigResource (org.apache.kafka.common.config.ConfigResource)4 AbstractRequest (org.apache.kafka.common.requests.AbstractRequest)4 LinkedList (java.util.LinkedList)3 List (java.util.List)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 DeleteRecordsResult (org.apache.kafka.clients.admin.DeleteRecordsResult)3 DeletedRecords (org.apache.kafka.clients.admin.DeletedRecords)3 RecordsToDelete (org.apache.kafka.clients.admin.RecordsToDelete)3 Node (org.apache.kafka.common.Node)3 ApiException (org.apache.kafka.common.errors.ApiException)3 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)3