use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class DescribeProducersHandler method handleResponse.
@Override
public ApiResult<TopicPartition, PartitionProducerState> handleResponse(Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse) {
DescribeProducersResponse response = (DescribeProducersResponse) abstractResponse;
Map<TopicPartition, PartitionProducerState> completed = new HashMap<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
List<TopicPartition> unmapped = new ArrayList<>();
for (DescribeProducersResponseData.TopicResponse topicResponse : response.data().topics()) {
for (DescribeProducersResponseData.PartitionResponse partitionResponse : topicResponse.partitions()) {
TopicPartition topicPartition = new TopicPartition(topicResponse.name(), partitionResponse.partitionIndex());
Errors error = Errors.forCode(partitionResponse.errorCode());
if (error != Errors.NONE) {
ApiError apiError = new ApiError(error, partitionResponse.errorMessage());
handlePartitionError(topicPartition, apiError, failed, unmapped);
continue;
}
List<ProducerState> activeProducers = partitionResponse.activeProducers().stream().map(activeProducer -> {
OptionalLong currentTransactionFirstOffset = activeProducer.currentTxnStartOffset() < 0 ? OptionalLong.empty() : OptionalLong.of(activeProducer.currentTxnStartOffset());
OptionalInt coordinatorEpoch = activeProducer.coordinatorEpoch() < 0 ? OptionalInt.empty() : OptionalInt.of(activeProducer.coordinatorEpoch());
return new ProducerState(activeProducer.producerId(), activeProducer.producerEpoch(), activeProducer.lastSequence(), activeProducer.lastTimestamp(), coordinatorEpoch, currentTransactionFirstOffset);
}).collect(Collectors.toList());
completed.put(topicPartition, new PartitionProducerState(activeProducers));
}
}
return new ApiResult<>(completed, failed, unmapped);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class ListConsumerGroupOffsetsHandler method handleResponse.
@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, OffsetAndMetadata>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
validateKeys(groupIds);
final OffsetFetchResponse response = (OffsetFetchResponse) abstractResponse;
// the groupError will contain the group level error for v0-v8 OffsetFetchResponse
Errors groupError = response.groupLevelError(groupId.idValue);
if (groupError != Errors.NONE) {
final Map<CoordinatorKey, Throwable> failed = new HashMap<>();
final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
handleGroupError(groupId, groupError, failed, groupsToUnmap);
return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap));
} else {
final Map<TopicPartition, OffsetAndMetadata> groupOffsetsListing = new HashMap<>();
response.partitionDataMap(groupId.idValue).forEach((topicPartition, partitionData) -> {
final Errors error = partitionData.error;
if (error == Errors.NONE) {
final long offset = partitionData.offset;
final String metadata = partitionData.metadata;
final Optional<Integer> leaderEpoch = partitionData.leaderEpoch;
// Negative offset indicates that the group has no committed offset for this partition
if (offset < 0) {
groupOffsetsListing.put(topicPartition, null);
} else {
groupOffsetsListing.put(topicPartition, new OffsetAndMetadata(offset, leaderEpoch, metadata));
}
} else {
log.warn("Skipping return offset for {} due to error {}.", topicPartition, error);
}
});
return ApiResult.completed(groupId, groupOffsetsListing);
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class AlterConsumerGroupOffsetsHandler method handleResponse.
@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
validateKeys(groupIds);
final OffsetCommitResponse response = (OffsetCommitResponse) abstractResponse;
final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
final Set<CoordinatorKey> groupsToRetry = new HashSet<>();
final Map<TopicPartition, Errors> partitionResults = new HashMap<>();
for (OffsetCommitResponseTopic topic : response.data().topics()) {
for (OffsetCommitResponsePartition partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
Errors error = Errors.forCode(partition.errorCode());
if (error != Errors.NONE) {
handleError(groupId, topicPartition, error, partitionResults, groupsToUnmap, groupsToRetry);
} else {
partitionResults.put(topicPartition, error);
}
}
}
if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) {
return ApiResult.completed(groupId, partitionResults);
} else {
return ApiResult.unmapped(new ArrayList<>(groupsToUnmap));
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class PartitionLeaderStrategy method handleResponse.
@Override
public LookupResult<TopicPartition> handleResponse(Set<TopicPartition> requestPartitions, AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<TopicPartition, Throwable> failed = new HashMap<>();
Map<TopicPartition, Integer> mapped = new HashMap<>();
for (MetadataResponseData.MetadataResponseTopic topicMetadata : response.data().topics()) {
String topic = topicMetadata.name();
Errors topicError = Errors.forCode(topicMetadata.errorCode());
if (topicError != Errors.NONE) {
handleTopicError(topic, topicError, requestPartitions, failed);
continue;
}
for (MetadataResponseData.MetadataResponsePartition partitionMetadata : topicMetadata.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic, partitionMetadata.partitionIndex());
Errors partitionError = Errors.forCode(partitionMetadata.errorCode());
if (!requestPartitions.contains(topicPartition)) {
// topics, so we have to filter any that we are not interested in.
continue;
}
if (partitionError != Errors.NONE) {
handlePartitionError(topicPartition, partitionError, failed);
continue;
}
int leaderId = partitionMetadata.leaderId();
if (leaderId >= 0) {
mapped.put(topicPartition, leaderId);
} else {
log.debug("Metadata request for {} returned no error, but the leader is unknown. Will retry", topicPartition);
}
}
}
return new LookupResult<>(failed, mapped);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaConsumerTest method prepareOffsetCommitResponse.
private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final Map<TopicPartition, Long> partitionOffsets) {
final AtomicBoolean commitReceived = new AtomicBoolean(true);
Map<TopicPartition, Errors> response = new HashMap<>();
for (TopicPartition partition : partitionOffsets.keySet()) response.put(partition, Errors.NONE);
client.prepareResponseFrom(body -> {
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
Map<TopicPartition, Long> commitErrors = commitRequest.offsets();
for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
// verify that the expected offset has been committed
if (!commitErrors.get(partitionOffset.getKey()).equals(partitionOffset.getValue())) {
commitReceived.set(false);
return false;
}
}
return true;
}, offsetCommitResponse(response), coordinator);
return commitReceived;
}
Aggregations