use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method handleDescribeTopicsByIds.
private Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsByIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> topicIdsList = new ArrayList<>();
for (Uuid topicId : topicIds) {
if (topicIdIsUnrepresentable(topicId)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic id '" + topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
topicIdsList.add(topicId);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<Uuid, Errors> errors = response.errorsByTopicId();
for (Map.Entry<Uuid, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
Uuid topicId = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
String topicName = cluster.topicName(topicId);
if (topicName == null) {
future.completeExceptionally(new InvalidTopicException("TopicId " + topicId + " not found."));
continue;
}
Errors topicError = errors.get(topicId);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicIdsList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method handleDescribeTopicsByNames.
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNames(final Collection<String> topicNames, DescribeTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
final ArrayList<String> topicNamesList = new ArrayList<>();
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
topicNamesList.add(topicName);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
private boolean supportsDisablingTopicCreation = true;
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
if (supportsDisablingTopicCreation)
return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertToMetadataRequestTopic(topicNamesList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
else
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<String, Errors> errors = response.errors();
for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
String topicName = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
Errors topicError = errors.get(topicName);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
if (!cluster.topics().contains(topicName)) {
future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found."));
continue;
}
Uuid topicId = cluster.topicId(topicName);
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsDisablingTopicCreation) {
supportsDisablingTopicCreation = false;
return true;
}
return false;
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicNamesList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method describeLogDirs.
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
final long now = time.milliseconds();
for (final Integer brokerId : brokers) {
KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
futures.put(brokerId, future);
runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
if (descriptions.size() > 0) {
future.complete(descriptions);
} else {
// Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode());
future.completeExceptionally(error.exception());
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
}
return new DescribeLogDirsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaAdminClient method createAcls.
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
final List<AclCreation> aclCreations = new ArrayList<>();
final List<AclBinding> aclBindingsSent = new ArrayList<>();
for (AclBinding acl : acls) {
if (futures.get(acl) == null) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(acl, future);
String indefinite = acl.toFilter().findIndefiniteField();
if (indefinite == null) {
aclCreations.add(CreateAclsRequest.aclCreation(acl));
aclBindingsSent.add(acl);
} else {
future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
}
}
}
final CreateAclsRequestData data = new CreateAclsRequestData().setCreations(aclCreations);
runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
CreateAclsRequest.Builder createRequest(int timeoutMs) {
return new CreateAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
List<AclCreationResult> responses = response.results();
Iterator<AclCreationResult> iter = responses.iterator();
for (AclBinding aclBinding : aclBindingsSent) {
KafkaFutureImpl<Void> future = futures.get(aclBinding);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL: " + aclBinding));
} else {
AclCreationResult creation = iter.next();
Errors error = Errors.forCode(creation.errorCode());
ApiError apiError = new ApiError(error, creation.errorMessage());
if (apiError.isFailure())
future.completeExceptionally(apiError.exception());
else
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new CreateAclsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class Fetcher method initializeCompletedFetch.
/**
* Initialize a CompletedFetch object.
*/
private CompletedFetch initializeCompletedFetch(CompletedFetch nextCompletedFetch) {
TopicPartition tp = nextCompletedFetch.partition;
FetchResponseData.PartitionData partition = nextCompletedFetch.partitionData;
long fetchOffset = nextCompletedFetch.nextFetchOffset;
CompletedFetch completedFetch = null;
Errors error = Errors.forCode(partition.errorCode());
try {
if (!subscriptions.hasValidPosition(tp)) {
// this can happen when a rebalance happened while fetch is still in-flight
log.debug("Ignoring fetched records for partition {} since it no longer has valid position", tp);
} else if (error == Errors.NONE) {
// we are interested in this fetch only if the beginning offset matches the
// current consumed position
FetchPosition position = subscriptions.position(tp);
if (position == null || position.offset != fetchOffset) {
log.debug("Discarding stale fetch response for partition {} since its offset {} does not match " + "the expected offset {}", tp, fetchOffset, position);
return null;
}
log.trace("Preparing to read {} bytes of data for partition {} with offset {}", FetchResponse.recordsSize(partition), tp, position);
Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator();
completedFetch = nextCompletedFetch;
if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) {
if (completedFetch.responseVersion < 3) {
// Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + recordTooLargePartitions + " whose size is larger than the fetch size " + this.fetchSize + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", recordTooLargePartitions);
} else {
// This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74)
throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + fetchOffset + ". Received a non-empty fetch response from the server, but no " + "complete records were found.");
}
}
if (partition.highWatermark() >= 0) {
log.trace("Updating high watermark for partition {} to {}", tp, partition.highWatermark());
subscriptions.updateHighWatermark(tp, partition.highWatermark());
}
if (partition.logStartOffset() >= 0) {
log.trace("Updating log start offset for partition {} to {}", tp, partition.logStartOffset());
subscriptions.updateLogStartOffset(tp, partition.logStartOffset());
}
if (partition.lastStableOffset() >= 0) {
log.trace("Updating last stable offset for partition {} to {}", tp, partition.lastStableOffset());
subscriptions.updateLastStableOffset(tp, partition.lastStableOffset());
}
if (FetchResponse.isPreferredReplica(partition)) {
subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> {
long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs();
log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}", tp, partition.preferredReadReplica(), expireTimeMs);
return expireTimeMs;
});
}
nextCompletedFetch.initialized = true;
} else if (error == Errors.NOT_LEADER_OR_FOLLOWER || error == Errors.REPLICA_NOT_AVAILABLE || error == Errors.KAFKA_STORAGE_ERROR || error == Errors.FENCED_LEADER_EPOCH || error == Errors.OFFSET_NOT_AVAILABLE) {
log.debug("Error in fetch for partition {}: {}", tp, error.exceptionName());
this.metadata.requestUpdate();
} else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
log.warn("Received unknown topic or partition error in fetch for partition {}", tp);
this.metadata.requestUpdate();
} else if (error == Errors.UNKNOWN_TOPIC_ID) {
log.warn("Received unknown topic ID error in fetch for partition {}", tp);
this.metadata.requestUpdate();
} else if (error == Errors.INCONSISTENT_TOPIC_ID) {
log.warn("Received inconsistent topic ID error in fetch for partition {}", tp);
this.metadata.requestUpdate();
} else if (error == Errors.OFFSET_OUT_OF_RANGE) {
Optional<Integer> clearedReplicaId = subscriptions.clearPreferredReadReplica(tp);
if (!clearedReplicaId.isPresent()) {
// If there's no preferred replica to clear, we're fetching from the leader so handle this error normally
FetchPosition position = subscriptions.position(tp);
if (position == null || fetchOffset != position.offset) {
log.debug("Discarding stale fetch response for partition {} since the fetched offset {} " + "does not match the current offset {}", tp, fetchOffset, position);
} else {
handleOffsetOutOfRange(position, tp);
}
} else {
log.debug("Unset the preferred read replica {} for partition {} since we got {} when fetching {}", clearedReplicaId.get(), tp, error, fetchOffset);
}
} else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
// we log the actual partition and not just the topic to help with ACL propagation issues in large clusters
log.warn("Not authorized to read from partition {}.", tp);
throw new TopicAuthorizationException(Collections.singleton(tp.topic()));
} else if (error == Errors.UNKNOWN_LEADER_EPOCH) {
log.debug("Received unknown leader epoch error in fetch for partition {}", tp);
} else if (error == Errors.UNKNOWN_SERVER_ERROR) {
log.warn("Unknown server error while fetching offset {} for topic-partition {}", fetchOffset, tp);
} else if (error == Errors.CORRUPT_MESSAGE) {
throw new KafkaException("Encountered corrupt message when fetching offset " + fetchOffset + " for topic-partition " + tp);
} else {
throw new IllegalStateException("Unexpected error code " + error.code() + " while fetching at offset " + fetchOffset + " from topic-partition " + tp);
}
} finally {
if (completedFetch == null)
nextCompletedFetch.metricAggregator.record(tp, 0, 0);
if (error != Errors.NONE)
// we move the partition to the end if there was an error. This way, it's more likely that partitions for
// the same topic can remain together (allowing for more efficient serialization).
subscriptions.movePartitionToEnd(tp);
}
return completedFetch;
}
Aggregations