use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.
the class TaskManagerTest method shouldNotSendPurgeDataIfPreviousNotDone.
@Test
public void shouldNotSendPurgeDataIfPreviousNotDone() {
final KafkaFuture<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
final Map<TopicPartition, RecordsToDelete> recordsToDelete = Collections.singletonMap(t1p1, RecordsToDelete.beforeOffset(5L));
final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(Collections.singletonMap(t1p1, futureDeletedRecords));
EasyMock.expect(active.recordsToDelete()).andReturn(Collections.singletonMap(t1p1, 5L)).once();
EasyMock.expect(adminClient.deleteRecords(recordsToDelete)).andReturn(deleteRecordsResult).once();
replay();
taskManager.maybePurgeCommitedRecords();
// second call should be no-op as the previous one is not done yet
taskManager.maybePurgeCommitedRecords();
verify(active, adminClient);
}
use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method deleteRecords.
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
// requests need to be sent to partitions leader nodes so ...
// ... from the provided map it's needed to create more maps grouping topic/partition per leader
final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
futures.put(topicPartition, new KafkaFutureImpl<DeletedRecords>());
}
// preparing topics list for asking metadata about them
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
topics.add(topicPartition.topic());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
// asking for topics metadata for getting partitions leaders
runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new ArrayList<>(topics), false);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, Errors> errors = response.errors();
Cluster cluster = response.cluster();
// completing futures for topics with errors
for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
for (Map.Entry<TopicPartition, KafkaFutureImpl<DeletedRecords>> future : futures.entrySet()) {
if (future.getKey().topic().equals(topicError.getKey())) {
future.getValue().completeExceptionally(topicError.getValue().exception());
}
}
}
// grouping topic partitions per leader
Map<Node, Map<TopicPartition, Long>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
// avoiding to send deletion request for topics with errors
if (!errors.containsKey(entry.getKey().topic())) {
Node node = cluster.leaderFor(entry.getKey());
if (node != null) {
if (!leaders.containsKey(node))
leaders.put(node, new HashMap<TopicPartition, Long>());
leaders.get(node).put(entry.getKey(), entry.getValue().beforeOffset());
} else {
KafkaFutureImpl<DeletedRecords> future = futures.get(entry.getKey());
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
}
}
for (final Map.Entry<Node, Map<TopicPartition, Long>> entry : leaders.entrySet()) {
final long nowDelete = time.milliseconds();
final int brokerId = entry.getKey().id();
runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DeleteRecordsRequest.Builder(timeoutMs, entry.getValue());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
for (Map.Entry<TopicPartition, DeleteRecordsResponse.PartitionResponse> result : response.responses().entrySet()) {
KafkaFutureImpl<DeletedRecords> future = futures.get(result.getKey());
if (result.getValue().error == Errors.NONE) {
future.complete(new DeletedRecords(result.getValue().lowWatermark));
} else {
future.completeExceptionally(result.getValue().error.exception());
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowDelete);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowMetadata);
return new DeleteRecordsResult(new HashMap<TopicPartition, KafkaFuture<DeletedRecords>>(futures));
}
use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method createAcls.
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
final List<AclCreation> aclCreations = new ArrayList<>();
for (AclBinding acl : acls) {
if (futures.get(acl) == null) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(acl, future);
String indefinite = acl.toFilter().findIndefiniteField();
if (indefinite == null) {
aclCreations.add(new AclCreation(acl));
} else {
future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
}
}
}
runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new CreateAclsRequest.Builder(aclCreations);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
List<AclCreationResponse> responses = response.aclCreationResponses();
Iterator<AclCreationResponse> iter = responses.iterator();
for (AclCreation aclCreation : aclCreations) {
KafkaFutureImpl<Void> future = futures.get(aclCreation.acl());
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL."));
} else {
AclCreationResponse creation = iter.next();
if (creation.error().isFailure()) {
future.completeExceptionally(creation.error().exception());
} else {
future.complete(null);
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new CreateAclsResult(new HashMap<AclBinding, KafkaFuture<Void>>(futures));
}
use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
// The BROKER resources which we want to describe. We must make a separate DescribeConfigs
// request for every BROKER resource we want to describe.
final Collection<Resource> brokerResources = new ArrayList<>();
// The non-BROKER resources which we want to describe. These resources can be described by a
// single, unified DescribeConfigs request.
final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
for (ConfigResource resource : configResources) {
if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
brokerFutures.put(resource, new KafkaFutureImpl<Config>());
brokerResources.add(configResourceToResource(resource));
} else {
unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
unifiedRequestResources.add(configResourceToResource(resource));
}
}
final long now = time.milliseconds();
if (!unifiedRequestResources.isEmpty()) {
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
ConfigResource configResource = entry.getKey();
KafkaFutureImpl<Config> future = entry.getValue();
DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
if (config == null) {
future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
continue;
}
if (config.error().isFailure()) {
future.completeExceptionally(config.error().exception());
continue;
}
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
future.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unifiedRequestFutures.values(), throwable);
}
}, now);
}
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
final Resource resource = configResourceToResource(entry.getKey());
final int nodeId = Integer.parseInt(resource.name());
runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
DescribeConfigsResponse.Config config = response.configs().get(resource);
if (config == null) {
brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
return;
}
if (config.error().isFailure())
brokerFuture.completeExceptionally(config.error().exception());
else {
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
brokerFuture.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
brokerFuture.completeExceptionally(throwable);
}
}, now);
}
final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
allFutures.putAll(brokerFutures);
allFutures.putAll(unifiedRequestFutures);
return new DescribeConfigsResult(allFutures);
}
use of org.apache.kafka.common.internals.KafkaFutureImpl in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method alterReplicaLogDirs.
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
for (TopicPartitionReplica replica : replicaAssignment.keySet()) futures.put(replica, new KafkaFutureImpl<Void>());
Map<Integer, Map<TopicPartition, String>> replicaAssignmentByBroker = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry : replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String logDir = entry.getValue();
int brokerId = replica.brokerId();
TopicPartition topicPartition = new TopicPartition(replica.topic(), replica.partition());
if (!replicaAssignmentByBroker.containsKey(brokerId))
replicaAssignmentByBroker.put(brokerId, new HashMap<TopicPartition, String>());
replicaAssignmentByBroker.get(brokerId).put(topicPartition, logDir);
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<TopicPartition, String>> entry : replicaAssignmentByBroker.entrySet()) {
final int brokerId = entry.getKey();
final Map<TopicPartition, String> assignment = entry.getValue();
runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
return new AlterReplicaLogDirsRequest.Builder(assignment);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
for (Map.Entry<TopicPartition, Errors> responseEntry : response.responses().entrySet()) {
TopicPartition tp = responseEntry.getKey();
Errors error = responseEntry.getValue();
TopicPartitionReplica replica = new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId);
KafkaFutureImpl<Void> future = futures.get(replica);
if (future == null) {
handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
} else if (error == Errors.NONE) {
future.complete(null);
} else {
future.completeExceptionally(error.exception());
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new AlterReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<Void>>(futures));
}
Aggregations