use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class LeaderAndIsrResponse method errorCounts.
@Override
public Map<Errors, Integer> errorCounts() {
Errors error = error();
if (error != Errors.NONE) {
// Minor optimization since the top-level error applies to all partitions
if (version < 5)
return Collections.singletonMap(error, data.partitionErrors().size() + 1);
return Collections.singletonMap(error, data.topics().stream().mapToInt(t -> t.partitionErrors().size()).sum() + 1);
}
Map<Errors, Integer> errors;
if (version < 5)
errors = errorCounts(data.partitionErrors().stream().map(l -> Errors.forCode(l.errorCode())));
else
errors = errorCounts(data.topics().stream().flatMap(t -> t.partitionErrors().stream()).map(l -> Errors.forCode(l.errorCode())));
updateErrorCounts(errors, Errors.NONE);
return errors;
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class MetadataTest method testMetadataTopicErrors.
@Test
public void testMetadataTopicErrors() {
Time time = new MockTime();
Map<String, Errors> topicErrors = new HashMap<>(3);
topicErrors.put("invalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
topicErrors.put("sensitiveTopic1", Errors.TOPIC_AUTHORIZATION_FAILED);
topicErrors.put("sensitiveTopic2", Errors.TOPIC_AUTHORIZATION_FAILED);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, topicErrors, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e1 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic1"));
assertEquals(Collections.singleton("sensitiveTopic1"), e1.unauthorizedTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e2 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic2"));
assertEquals(Collections.singleton("sensitiveTopic2"), e2.unauthorizedTopics());
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
InvalidTopicException e3 = assertThrows(InvalidTopicException.class, () -> metadata.maybeThrowExceptionForTopic("invalidTopic"));
assertEquals(Collections.singleton("invalidTopic"), e3.invalidTopics());
metadata.maybeThrowAnyException();
// Other topics should not throw exception, but they should clear existing exception
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
metadata.maybeThrowExceptionForTopic("anotherTopic");
metadata.maybeThrowAnyException();
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class EndTxnResponseTest method testConstructor.
@Test
public void testConstructor() {
int throttleTimeMs = 10;
EndTxnResponseData data = new EndTxnResponseData().setErrorCode(Errors.NOT_COORDINATOR.code()).setThrottleTimeMs(throttleTimeMs);
Map<Errors, Integer> expectedErrorCounts = Collections.singletonMap(Errors.NOT_COORDINATOR, 1);
for (short version : ApiKeys.END_TXN.allVersions()) {
EndTxnResponse response = new EndTxnResponse(data);
assertEquals(expectedErrorCounts, response.errorCounts());
assertEquals(throttleTimeMs, response.throttleTimeMs());
assertEquals(version >= 1, response.shouldClientThrottle(version));
response = EndTxnResponse.parse(response.serialize(version), version);
assertEquals(expectedErrorCounts, response.errorCounts());
assertEquals(throttleTimeMs, response.throttleTimeMs());
assertEquals(version >= 1, response.shouldClientThrottle(version));
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method alterUserScramCredentials.
@Override
public AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations, AlterUserScramCredentialsOptions options) {
final long now = time.milliseconds();
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (UserScramCredentialAlteration alteration : alterations) {
futures.put(alteration.user(), new KafkaFutureImpl<>());
}
final Map<String, Exception> userIllegalAlterationExceptions = new HashMap<>();
// We need to keep track of users with deletions of an unknown SCRAM mechanism
final String usernameMustNotBeEmptyMsg = "Username must not be empty";
String passwordMustNotBeEmptyMsg = "Password must not be empty";
final String unknownScramMechanismMsg = "Unknown SCRAM mechanism";
alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialDeletion deletion = (UserScramCredentialDeletion) alteration;
ScramMechanism mechanism = deletion.mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
}
}
});
// Creating an upsertion may throw InvalidKeyException or NoSuchAlgorithmException,
// so keep track of which users are affected by such a failure so we can fail all their alterations later
final Map<String, Map<ScramMechanism, AlterUserScramCredentialsRequestData.ScramCredentialUpsertion>> userInsertions = new HashMap<>();
alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion).filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())).forEach(alteration -> {
final String user = alteration.user();
if (user == null || user.isEmpty()) {
userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg));
} else {
UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration;
try {
byte[] password = upsertion.password();
if (password == null || password.length == 0) {
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg));
} else {
ScramMechanism mechanism = upsertion.credentialInfo().mechanism();
if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) {
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} else {
userInsertions.putIfAbsent(user, new HashMap<>());
userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion));
}
}
} catch (NoSuchAlgorithmException e) {
// we might overwrite an exception from a previous alteration, but we don't really care
// since we just need to mark this user as having at least one illegal alteration
// and make an exception instance available for completing the corresponding future exceptionally
userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg));
} catch (InvalidKeyException e) {
// generally shouldn't happen since we deal with the empty password case above,
// but we still need to catch/handle it
userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e));
}
}
});
// submit alterations only for users that do not have an illegal alteration as identified above
Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) {
return new AlterUserScramCredentialsRequest.Builder(new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion).filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())).map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())).collect(Collectors.toList())).setDeletions(alterations.stream().filter(a -> a instanceof UserScramCredentialDeletion).filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())).map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)).collect(Collectors.toList())));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterUserScramCredentialsResponse response = (AlterUserScramCredentialsResponse) abstractResponse;
// Check for controller change
for (Errors error : response.errorCounts().keySet()) {
if (error == Errors.NOT_CONTROLLER) {
handleNotControllerError(error);
}
}
/* Now that we have the results for the ones we sent,
* fail any users that have an illegal alteration as identified above.
* Be sure to do this after the NOT_CONTROLLER error check above
* so that all errors are consistent in that case.
*/
userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> {
futures.get(entry.getKey()).completeExceptionally(entry.getValue());
});
response.data().results().forEach(result -> {
KafkaFutureImpl<Void> future = futures.get(result.user());
if (future == null) {
log.warn("Server response mentioned unknown user {}", result.user());
} else {
Errors error = Errors.forCode(result.errorCode());
if (error != Errors.NONE) {
future.completeExceptionally(error.exception(result.errorMessage()));
} else {
future.complete(null);
}
}
});
completeUnrealizedFutures(futures.entrySet().stream(), user -> "The broker response did not contain a result for user " + user);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
};
runnable.call(call, now);
return new AlterUserScramCredentialsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method getListOffsetsCalls.
// visible for benchmark
List<Call> getListOffsetsCalls(MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context, Map<TopicPartition, OffsetSpec> topicPartitionOffsets, Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures) {
MetadataResponse mr = context.response().orElseThrow(() -> new IllegalStateException("No Metadata response"));
Cluster clusterSnapshot = mr.buildCluster();
List<Call> calls = new ArrayList<>();
// grouping topic partitions per leader
Map<Node, Map<String, ListOffsetsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetSpec> entry : topicPartitionOffsets.entrySet()) {
OffsetSpec offsetSpec = entry.getValue();
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
long offsetQuery = getOffsetFromOffsetSpec(offsetSpec);
// avoid sending listOffsets request for topics with errors
if (!mr.errors().containsKey(tp.topic())) {
Node node = clusterSnapshot.leaderFor(tp);
if (node != null) {
Map<String, ListOffsetsTopic> leadersOnNode = leaders.computeIfAbsent(node, k -> new HashMap<>());
ListOffsetsTopic topic = leadersOnNode.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopic().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartition().setPartitionIndex(tp.partition()).setTimestamp(offsetQuery));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
} else {
future.completeExceptionally(mr.errors().get(tp.topic()).exception());
}
}
for (final Map.Entry<Node, Map<String, ListOffsetsTopic>> entry : leaders.entrySet()) {
final int brokerId = entry.getKey().id();
calls.add(new Call("listOffsets on broker " + brokerId, context.deadline(), new ConstantNodeIdProvider(brokerId)) {
final List<ListOffsetsTopic> partitionsToQuery = new ArrayList<>(entry.getValue().values());
private boolean supportsMaxTimestamp = partitionsToQuery.stream().flatMap(t -> t.partitions().stream()).anyMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP);
@Override
ListOffsetsRequest.Builder createRequest(int timeoutMs) {
return ListOffsetsRequest.Builder.forConsumer(true, context.options().isolationLevel(), supportsMaxTimestamp).setTargetTimes(partitionsToQuery);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, OffsetSpec> retryTopicPartitionOffsets = new HashMap<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
Errors error = Errors.forCode(partition.errorCode());
OffsetSpec offsetRequestSpec = topicPartitionOffsets.get(tp);
if (offsetRequestSpec == null) {
log.warn("Server response mentioned unknown topic partition {}", tp);
} else if (MetadataOperationContext.shouldRefreshMetadata(error)) {
retryTopicPartitionOffsets.put(tp, offsetRequestSpec);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch());
future.complete(new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
future.completeExceptionally(error.exception());
}
}
}
if (retryTopicPartitionOffsets.isEmpty()) {
// The server should send back a response for every topic partition. But do a sanity check anyway.
for (ListOffsetsTopic topic : partitionsToQuery) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
ApiException error = new ApiException("The response from broker " + brokerId + " did not contain a result for topic partition " + tp);
futures.get(tp).completeExceptionally(error);
}
}
} else {
Set<String> retryTopics = retryTopicPartitionOffsets.keySet().stream().map(TopicPartition::topic).collect(Collectors.toSet());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> retryContext = new MetadataOperationContext<>(retryTopics, context.options(), context.deadline(), futures);
rescheduleMetadataTask(retryContext, () -> getListOffsetsCalls(retryContext, retryTopicPartitionOffsets, futures));
}
}
@Override
void handleFailure(Throwable throwable) {
for (ListOffsetsTopic topic : entry.getValue().values()) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
future.completeExceptionally(throwable);
}
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsMaxTimestamp) {
supportsMaxTimestamp = false;
// fail any unsupported futures and remove partitions from the downgraded retry
Iterator<ListOffsetsTopic> topicIterator = partitionsToQuery.iterator();
while (topicIterator.hasNext()) {
ListOffsetsTopic topic = topicIterator.next();
Iterator<ListOffsetsPartition> partitionIterator = topic.partitions().iterator();
while (partitionIterator.hasNext()) {
ListOffsetsPartition partition = partitionIterator.next();
if (partition.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP) {
futures.get(new TopicPartition(topic.name(), partition.partitionIndex())).completeExceptionally(new UnsupportedVersionException("Broker " + brokerId + " does not support MAX_TIMESTAMP offset spec"));
partitionIterator.remove();
}
}
if (topic.partitions().isEmpty()) {
topicIterator.remove();
}
}
return !partitionsToQuery.isEmpty();
}
return false;
}
});
}
return calls;
}
Aggregations