use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class OffsetFetchResponseTest method testConstructorWithMultipleGroups.
@Test
public void testConstructorWithMultipleGroups() {
Map<String, Map<TopicPartition, PartitionData>> responseData = new HashMap<>();
Map<String, Errors> errorMap = new HashMap<>();
Map<TopicPartition, PartitionData> pd1 = new HashMap<>();
Map<TopicPartition, PartitionData> pd2 = new HashMap<>();
Map<TopicPartition, PartitionData> pd3 = new HashMap<>();
pd1.put(new TopicPartition(topicOne, partitionOne), new PartitionData(offset, leaderEpochOne, metadata, Errors.TOPIC_AUTHORIZATION_FAILED));
pd2.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.UNKNOWN_TOPIC_OR_PARTITION));
pd3.put(new TopicPartition(topicThree, partitionThree), new PartitionData(offset, leaderEpochThree, metadata, Errors.NONE));
responseData.put(groupOne, pd1);
responseData.put(groupTwo, pd2);
responseData.put(groupThree, pd3);
errorMap.put(groupOne, Errors.NOT_COORDINATOR);
errorMap.put(groupTwo, Errors.COORDINATOR_LOAD_IN_PROGRESS);
errorMap.put(groupThree, Errors.NONE);
for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
if (version >= 8) {
OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, errorMap, responseData);
assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne));
assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, response.groupLevelError(groupTwo));
assertEquals(Errors.NONE, response.groupLevelError(groupThree));
assertTrue(response.groupHasError(groupOne));
assertTrue(response.groupHasError(groupTwo));
assertFalse(response.groupHasError(groupThree));
assertEquals(5, response.errorCounts().size());
assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), Utils.mkEntry(Errors.COORDINATOR_LOAD_IN_PROGRESS, 1), Utils.mkEntry(Errors.NONE, 2)), response.errorCounts());
assertEquals(throttleTimeMs, response.throttleTimeMs());
Map<TopicPartition, PartitionData> responseData1 = response.partitionDataMap(groupOne);
assertEquals(pd1, responseData1);
responseData1.forEach((tp, data) -> assertTrue(data.hasError()));
Map<TopicPartition, PartitionData> responseData2 = response.partitionDataMap(groupTwo);
assertEquals(pd2, responseData2);
responseData2.forEach((tp, data) -> assertTrue(data.hasError()));
Map<TopicPartition, PartitionData> responseData3 = response.partitionDataMap(groupThree);
assertEquals(pd3, responseData3);
responseData3.forEach((tp, data) -> assertFalse(data.hasError()));
}
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class StopReplicaResponseTest method testErrorCountsNoTopLevelError.
@Test
public void testErrorCountsNoTopLevelError() {
List<StopReplicaPartitionError> errors = new ArrayList<>();
errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(0));
errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(1).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()));
StopReplicaResponse response = new StopReplicaResponse(new StopReplicaResponseData().setErrorCode(Errors.NONE.code()).setPartitionErrors(errors));
Map<Errors, Integer> errorCounts = response.errorCounts();
assertEquals(2, errorCounts.size());
assertEquals(2, errorCounts.get(Errors.NONE).intValue());
assertEquals(1, errorCounts.get(Errors.CLUSTER_AUTHORIZATION_FAILED).intValue());
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class UpdateFeaturesResponseTest method testErrorCounts.
@Test
public void testErrorCounts() {
UpdateFeaturesResponseData.UpdatableFeatureResultCollection results = new UpdateFeaturesResponseData.UpdatableFeatureResultCollection();
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("foo").setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()));
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("bar").setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()));
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("baz").setErrorCode(Errors.FEATURE_UPDATE_FAILED.code()));
UpdateFeaturesResponse response = new UpdateFeaturesResponse(new UpdateFeaturesResponseData().setErrorCode(Errors.INVALID_REQUEST.code()).setResults(results));
Map<Errors, Integer> errorCounts = response.errorCounts();
assertEquals(3, errorCounts.size());
assertEquals(1, errorCounts.get(Errors.INVALID_REQUEST).intValue());
assertEquals(2, errorCounts.get(Errors.UNKNOWN_SERVER_ERROR).intValue());
assertEquals(1, errorCounts.get(Errors.FEATURE_UPDATE_FAILED).intValue());
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class DeleteConsumerGroupOffsetsHandler method handleResponse.
@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
validateKeys(groupIds);
final OffsetDeleteResponse response = (OffsetDeleteResponse) abstractResponse;
final Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
final Map<CoordinatorKey, Throwable> failed = new HashMap<>();
final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
handleGroupError(groupId, error, failed, groupsToUnmap);
return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap));
} else {
final Map<TopicPartition, Errors> partitionResults = new HashMap<>();
response.data().topics().forEach(topic -> topic.partitions().forEach(partition -> partitionResults.put(new TopicPartition(topic.name(), partition.partitionIndex()), Errors.forCode(partition.errorCode()))));
return ApiResult.completed(groupId, partitionResults);
}
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class ListTransactionsHandler method handleResponse.
@Override
public ApiResult<AllBrokersStrategy.BrokerKey, Collection<TransactionListing>> handleResponse(Node broker, Set<AllBrokersStrategy.BrokerKey> keys, AbstractResponse abstractResponse) {
int brokerId = broker.id();
AllBrokersStrategy.BrokerKey key = requireSingleton(keys, brokerId);
ListTransactionsResponse response = (ListTransactionsResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) {
log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is still loading state. Will try again after backing off", brokerId);
return ApiResult.empty();
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE) {
log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is shutting down", brokerId);
return ApiResult.failed(key, new CoordinatorNotAvailableException("ListTransactions " + "request sent to broker " + brokerId + " failed because the coordinator is shutting down"));
} else if (error != Errors.NONE) {
log.error("The `ListTransactions` request sent to broker {} failed because of an " + "unexpected error {}", brokerId, error);
return ApiResult.failed(key, error.exception("ListTransactions request " + "sent to broker " + brokerId + " failed with an unexpected exception"));
} else {
List<TransactionListing> listings = response.data().transactionStates().stream().map(transactionState -> new TransactionListing(transactionState.transactionalId(), transactionState.producerId(), TransactionState.parse(transactionState.transactionState()))).collect(Collectors.toList());
return ApiResult.completed(key, listings);
}
}
Aggregations