Search in sources :

Example 56 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class OffsetFetchResponseTest method testConstructorWithMultipleGroups.

@Test
public void testConstructorWithMultipleGroups() {
    Map<String, Map<TopicPartition, PartitionData>> responseData = new HashMap<>();
    Map<String, Errors> errorMap = new HashMap<>();
    Map<TopicPartition, PartitionData> pd1 = new HashMap<>();
    Map<TopicPartition, PartitionData> pd2 = new HashMap<>();
    Map<TopicPartition, PartitionData> pd3 = new HashMap<>();
    pd1.put(new TopicPartition(topicOne, partitionOne), new PartitionData(offset, leaderEpochOne, metadata, Errors.TOPIC_AUTHORIZATION_FAILED));
    pd2.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData(offset, leaderEpochTwo, metadata, Errors.UNKNOWN_TOPIC_OR_PARTITION));
    pd3.put(new TopicPartition(topicThree, partitionThree), new PartitionData(offset, leaderEpochThree, metadata, Errors.NONE));
    responseData.put(groupOne, pd1);
    responseData.put(groupTwo, pd2);
    responseData.put(groupThree, pd3);
    errorMap.put(groupOne, Errors.NOT_COORDINATOR);
    errorMap.put(groupTwo, Errors.COORDINATOR_LOAD_IN_PROGRESS);
    errorMap.put(groupThree, Errors.NONE);
    for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
        if (version >= 8) {
            OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, errorMap, responseData);
            assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne));
            assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, response.groupLevelError(groupTwo));
            assertEquals(Errors.NONE, response.groupLevelError(groupThree));
            assertTrue(response.groupHasError(groupOne));
            assertTrue(response.groupHasError(groupTwo));
            assertFalse(response.groupHasError(groupThree));
            assertEquals(5, response.errorCounts().size());
            assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), Utils.mkEntry(Errors.COORDINATOR_LOAD_IN_PROGRESS, 1), Utils.mkEntry(Errors.NONE, 2)), response.errorCounts());
            assertEquals(throttleTimeMs, response.throttleTimeMs());
            Map<TopicPartition, PartitionData> responseData1 = response.partitionDataMap(groupOne);
            assertEquals(pd1, responseData1);
            responseData1.forEach((tp, data) -> assertTrue(data.hasError()));
            Map<TopicPartition, PartitionData> responseData2 = response.partitionDataMap(groupTwo);
            assertEquals(pd2, responseData2);
            responseData2.forEach((tp, data) -> assertTrue(data.hasError()));
            Map<TopicPartition, PartitionData> responseData3 = response.partitionDataMap(groupThree);
            assertEquals(pd3, responseData3);
            responseData3.forEach((tp, data) -> assertFalse(data.hasError()));
        }
    }
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) PartitionData(org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 57 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class StopReplicaResponseTest method testErrorCountsNoTopLevelError.

@Test
public void testErrorCountsNoTopLevelError() {
    List<StopReplicaPartitionError> errors = new ArrayList<>();
    errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(0));
    errors.add(new StopReplicaPartitionError().setTopicName("foo").setPartitionIndex(1).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()));
    StopReplicaResponse response = new StopReplicaResponse(new StopReplicaResponseData().setErrorCode(Errors.NONE.code()).setPartitionErrors(errors));
    Map<Errors, Integer> errorCounts = response.errorCounts();
    assertEquals(2, errorCounts.size());
    assertEquals(2, errorCounts.get(Errors.NONE).intValue());
    assertEquals(1, errorCounts.get(Errors.CLUSTER_AUTHORIZATION_FAILED).intValue());
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) StopReplicaPartitionError(org.apache.kafka.common.message.StopReplicaResponseData.StopReplicaPartitionError) ArrayList(java.util.ArrayList) StopReplicaResponseData(org.apache.kafka.common.message.StopReplicaResponseData) Test(org.junit.jupiter.api.Test)

Example 58 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class UpdateFeaturesResponseTest method testErrorCounts.

@Test
public void testErrorCounts() {
    UpdateFeaturesResponseData.UpdatableFeatureResultCollection results = new UpdateFeaturesResponseData.UpdatableFeatureResultCollection();
    results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("foo").setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()));
    results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("bar").setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()));
    results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult().setFeature("baz").setErrorCode(Errors.FEATURE_UPDATE_FAILED.code()));
    UpdateFeaturesResponse response = new UpdateFeaturesResponse(new UpdateFeaturesResponseData().setErrorCode(Errors.INVALID_REQUEST.code()).setResults(results));
    Map<Errors, Integer> errorCounts = response.errorCounts();
    assertEquals(3, errorCounts.size());
    assertEquals(1, errorCounts.get(Errors.INVALID_REQUEST).intValue());
    assertEquals(2, errorCounts.get(Errors.UNKNOWN_SERVER_ERROR).intValue());
    assertEquals(1, errorCounts.get(Errors.FEATURE_UPDATE_FAILED).intValue());
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) UpdateFeaturesResponseData(org.apache.kafka.common.message.UpdateFeaturesResponseData) Test(org.junit.jupiter.api.Test)

Example 59 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class DeleteConsumerGroupOffsetsHandler method handleResponse.

@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse(Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse) {
    validateKeys(groupIds);
    final OffsetDeleteResponse response = (OffsetDeleteResponse) abstractResponse;
    final Errors error = Errors.forCode(response.data().errorCode());
    if (error != Errors.NONE) {
        final Map<CoordinatorKey, Throwable> failed = new HashMap<>();
        final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
        handleGroupError(groupId, error, failed, groupsToUnmap);
        return new ApiResult<>(Collections.emptyMap(), failed, new ArrayList<>(groupsToUnmap));
    } else {
        final Map<TopicPartition, Errors> partitionResults = new HashMap<>();
        response.data().topics().forEach(topic -> topic.partitions().forEach(partition -> partitionResults.put(new TopicPartition(topic.name(), partition.partitionIndex()), Errors.forCode(partition.errorCode()))));
        return ApiResult.completed(groupId, partitionResults);
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) OffsetDeleteRequestTopic(org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopic) Set(java.util.Set) HashMap(java.util.HashMap) Collectors(java.util.stream.Collectors) ArrayList(java.util.ArrayList) OffsetDeleteRequest(org.apache.kafka.common.requests.OffsetDeleteRequest) HashSet(java.util.HashSet) OffsetDeleteRequestData(org.apache.kafka.common.message.OffsetDeleteRequestData) OffsetDeleteRequestTopicCollection(org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopicCollection) CoordinatorType(org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType) Map(java.util.Map) LogContext(org.apache.kafka.common.utils.LogContext) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) OffsetDeleteResponse(org.apache.kafka.common.requests.OffsetDeleteResponse) OffsetDeleteRequestPartition(org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestPartition) Collections(java.util.Collections) OffsetDeleteResponse(org.apache.kafka.common.requests.OffsetDeleteResponse) HashMap(java.util.HashMap) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Example 60 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class ListTransactionsHandler method handleResponse.

@Override
public ApiResult<AllBrokersStrategy.BrokerKey, Collection<TransactionListing>> handleResponse(Node broker, Set<AllBrokersStrategy.BrokerKey> keys, AbstractResponse abstractResponse) {
    int brokerId = broker.id();
    AllBrokersStrategy.BrokerKey key = requireSingleton(keys, brokerId);
    ListTransactionsResponse response = (ListTransactionsResponse) abstractResponse;
    Errors error = Errors.forCode(response.data().errorCode());
    if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) {
        log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is still loading state. Will try again after backing off", brokerId);
        return ApiResult.empty();
    } else if (error == Errors.COORDINATOR_NOT_AVAILABLE) {
        log.debug("The `ListTransactions` request sent to broker {} failed because the " + "coordinator is shutting down", brokerId);
        return ApiResult.failed(key, new CoordinatorNotAvailableException("ListTransactions " + "request sent to broker " + brokerId + " failed because the coordinator is shutting down"));
    } else if (error != Errors.NONE) {
        log.error("The `ListTransactions` request sent to broker {} failed because of an " + "unexpected error {}", brokerId, error);
        return ApiResult.failed(key, error.exception("ListTransactions request " + "sent to broker " + brokerId + " failed with an unexpected exception"));
    } else {
        List<TransactionListing> listings = response.data().transactionStates().stream().map(transactionState -> new TransactionListing(transactionState.transactionalId(), transactionState.producerId(), TransactionState.parse(transactionState.transactionState()))).collect(Collectors.toList());
        return ApiResult.completed(key, listings);
    }
}
Also used : CoordinatorNotAvailableException(org.apache.kafka.common.errors.CoordinatorNotAvailableException) Logger(org.slf4j.Logger) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Collection(java.util.Collection) Set(java.util.Set) ListTransactionsRequest(org.apache.kafka.common.requests.ListTransactionsRequest) Collectors(java.util.stream.Collectors) ListTransactionsRequestData(org.apache.kafka.common.message.ListTransactionsRequestData) ArrayList(java.util.ArrayList) TransactionState(org.apache.kafka.clients.admin.TransactionState) ListTransactionsResponse(org.apache.kafka.common.requests.ListTransactionsResponse) List(java.util.List) LogContext(org.apache.kafka.common.utils.LogContext) ListTransactionsOptions(org.apache.kafka.clients.admin.ListTransactionsOptions) Errors(org.apache.kafka.common.protocol.Errors) TransactionListing(org.apache.kafka.clients.admin.TransactionListing) Node(org.apache.kafka.common.Node) Errors(org.apache.kafka.common.protocol.Errors) ListTransactionsResponse(org.apache.kafka.common.requests.ListTransactionsResponse) CoordinatorNotAvailableException(org.apache.kafka.common.errors.CoordinatorNotAvailableException) TransactionListing(org.apache.kafka.clients.admin.TransactionListing)

Aggregations

Errors (org.apache.kafka.common.protocol.Errors)167 HashMap (java.util.HashMap)115 TopicPartition (org.apache.kafka.common.TopicPartition)87 Map (java.util.Map)61 ArrayList (java.util.ArrayList)46 LinkedHashMap (java.util.LinkedHashMap)31 Test (org.junit.jupiter.api.Test)31 List (java.util.List)19 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)19 HashSet (java.util.HashSet)18 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)18 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)17 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)17 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)17 KafkaException (org.apache.kafka.common.KafkaException)16 Node (org.apache.kafka.common.Node)16 Cluster (org.apache.kafka.common.Cluster)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)14 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)14 Collections (java.util.Collections)13