use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class KafkaAdminClient method getDeleteTopicsCall.
private Call getDeleteTopicsCall(final DeleteTopicsOptions options, final Map<String, KafkaFutureImpl<Void>> futures, final List<String> topics, final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions, final long now, final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopicNames(topics).setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<String> retryTopics = new ArrayList<>();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.name());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(), topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsCall(options, futures, retryTopics, retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> brokerFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
brokerFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : brokerFutures.entrySet()) {
Integer broker = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), broker != null ? new ConstantNodeIdProvider(broker) : new LeastLoadedNodeProvider()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(unified.keySet().stream().map(config -> new DescribeConfigsRequestData.DescribeConfigsResource().setResourceName(config.name()).setResourceType(config.type().id()).setConfigurationKeys(null)).collect(Collectors.toList())).setIncludeSynonyms(options.includeSynonyms()).setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (broker != null) {
log.warn("The config {} in the response from broker {} is not in the request", configResource, broker);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request", configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()).exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(unified.entrySet().stream(), configResource -> "The broker response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(new HashMap<>(brokerFutures.entrySet().stream().flatMap(x -> x.getValue().entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
}
use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class KafkaAdminClient method describeReplicaLogDirs.
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(), brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic()).setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry : partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry : logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException("The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag(), replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class KafkaAdminClient method describeUserScramCredentials.
@Override
public DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, DescribeUserScramCredentialsOptions options) {
final KafkaFutureImpl<DescribeUserScramCredentialsResponseData> dataFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
Call call = new Call("describeUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
public DescribeUserScramCredentialsRequest.Builder createRequest(final int timeoutMs) {
final DescribeUserScramCredentialsRequestData requestData = new DescribeUserScramCredentialsRequestData();
if (users != null && !users.isEmpty()) {
final List<UserName> userNames = new ArrayList<>(users.size());
for (final String user : users) {
if (user != null) {
userNames.add(new UserName().setName(user));
}
}
requestData.setUsers(userNames);
}
return new DescribeUserScramCredentialsRequest.Builder(requestData);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeUserScramCredentialsResponse response = (DescribeUserScramCredentialsResponse) abstractResponse;
DescribeUserScramCredentialsResponseData data = response.data();
short messageLevelErrorCode = data.errorCode();
if (messageLevelErrorCode != Errors.NONE.code()) {
dataFuture.completeExceptionally(Errors.forCode(messageLevelErrorCode).exception(data.errorMessage()));
} else {
dataFuture.complete(data);
}
}
@Override
void handleFailure(Throwable throwable) {
dataFuture.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new DescribeUserScramCredentialsResult(dataFuture);
}
use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class KafkaAdminClient method alterUserScramCredentials.
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, AlterUserScramCredentialsOptions options) {
final long now = time.milliseconds();
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (UserScramCredentialAlteration alteration : alterations) {
futures.put(alteration.user(), new KafkaFutureImpl<>());
}
final Map<String, Exception> userIllegalAlterationExceptions = new HashMap<>();
// We need to keep track of users with deletions of an unknown SCRAM mechanism
final String usernameMustNotBeEmptyMsg = "Username must not be empty";
String passwordMustNotBeEmptyMsg = "Password must not be empty";
final String unknownScramMechanismMsg = "Unknown SCRAM mechanism";
alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialDeletion deletion = (UserScramCredentialDeletion) alteration;
ScramMechanism mechanism = deletion.mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
}
}
});
// Creating an upsertion may throw InvalidKeyException or NoSuchAlgorithmException,
// so keep track of which users are affected by such a failure so we can fail all their alterations later
final Map<String, Map<ScramMechanism, AlterUserScramCredentialsRequestData.ScramCredentialUpsertion>> userInsertions = new HashMap<>();
alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion).filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration;
try {
byte[] password = upsertion.password();
if (password == null || password.length == 0) {
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg));
} else {
ScramMechanism mechanism = upsertion.credentialInfo().mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} else {
userInsertions.putIfAbsent(user, new HashMap<>());
userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion));
}
}
} catch (NoSuchAlgorithmException e) {
// we might overwrite an exception from a previous alteration, but we don't really care
// since we just need to mark this user as having at least one illegal alteration
// and make an exception instance available for completing the corresponding future exceptionally
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} catch (InvalidKeyException e) {
// generally shouldn't happen since we deal with the empty password case above,
// but we still need to catch/handle it
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e));
}
}
});
// submit alterations only for users that do not have an illegal alteration as identified above
Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) {
return new AlterUserScramCredentialsRequest.Builder(new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion).filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())).map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())).collect(Collectors.toList())).setDeletions(alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())).map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)).collect(Collectors.toList())));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterUserScramCredentialsResponse response = (AlterUserScramCredentialsResponse) abstractResponse;
// Check for controller change
for (Errors error : response.errorCounts().keySet()) {
if (error == Errors.NOT_CONTROLLER) {
handleNotControllerError(error);
}
}
/* Now that we have the results for the ones we sent,
* fail any users that have an illegal alteration as identified above.
* Be sure to do this after the NOT_CONTROLLER error check above
* so that all errors are consistent in that case.
*/
userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> {
futures.get(entry.getKey()).completeExceptionally(entry.getValue());
});
response.data().results().forEach(result -> {
KafkaFutureImpl<Void> future = futures.get(result.user());
if (future == null) {
log.warn("Server response mentioned unknown user {}", result.user());
} else {
Errors error = Errors.forCode(result.errorCode());
if (error != Errors.NONE) {
future.completeExceptionally(error.exception(result.errorMessage()));
} else {
future.complete(null);
}
}
});
completeUnrealizedFutures(futures.entrySet().stream(), user -> "The broker response did not contain a result for user " + user);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
};
runnable.call(call, now);
return new AlterUserScramCredentialsResult(new HashMap<>(futures));
}
Aggregations