Search in sources :

Example 66 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class KafkaConsumerTest method listOffsetsResponse.

private ListOffsetsResponse listOffsetsResponse(Map<TopicPartition, Long> partitionOffsets, Map<TopicPartition, Errors> partitionErrors) {
    Map<String, ListOffsetsTopicResponse> responses = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
        TopicPartition tp = partitionOffset.getKey();
        ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
        topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(partitionOffset.getValue()));
    }
    for (Map.Entry<TopicPartition, Errors> partitionError : partitionErrors.entrySet()) {
        TopicPartition tp = partitionError.getKey();
        ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
        topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(partitionError.getValue().code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET));
    }
    ListOffsetsResponseData data = new ListOffsetsResponseData().setTopics(new ArrayList<>(responses.values()));
    return new ListOffsetsResponse(data);
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) OptionalLong(java.util.OptionalLong) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) AbstractMap(java.util.AbstractMap)

Example 67 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class TransactionManagerTest method testTransitionToAbortableErrorOnMultipleBatchExpiry.

@Test
public void testTransitionToAbortableErrorOnMultipleBatchExpiry() throws InterruptedException {
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    transactionManager.maybeAddPartition(tp1);
    Future<RecordMetadata> firstBatchResponse = appendToAccumulator(tp0);
    Future<RecordMetadata> secondBatchResponse = appendToAccumulator(tp1);
    assertFalse(firstBatchResponse.isDone());
    assertFalse(secondBatchResponse.isDone());
    Map<TopicPartition, Errors> partitionErrors = new HashMap<>();
    partitionErrors.put(tp0, Errors.NONE);
    partitionErrors.put(tp1, Errors.NONE);
    prepareAddPartitionsToTxn(partitionErrors);
    assertFalse(transactionManager.transactionContainsPartition(tp0));
    assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
    // Check that only addPartitions was sent.
    runUntil(() -> transactionManager.transactionContainsPartition(tp0));
    assertTrue(transactionManager.transactionContainsPartition(tp1));
    assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
    assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
    assertFalse(firstBatchResponse.isDone());
    assertFalse(secondBatchResponse.isDone());
    // Sleep 10 seconds to make sure that the batches in the queue would be expired if they can't be drained.
    time.sleep(10000);
    // Disconnect the target node for the pending produce request. This will ensure that sender will try to
    // expire the batch.
    Node clusterNode = metadata.fetch().nodes().get(0);
    client.disconnect(clusterNode.idString());
    client.backoff(clusterNode, 100);
    runUntil(firstBatchResponse::isDone);
    runUntil(secondBatchResponse::isDone);
    try {
        // make sure the produce was expired.
        firstBatchResponse.get();
        fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
    } catch (ExecutionException e) {
        assertTrue(e.getCause() instanceof TimeoutException);
    }
    try {
        // make sure the produce was expired.
        secondBatchResponse.get();
        fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
    } catch (ExecutionException e) {
        assertTrue(e.getCause() instanceof TimeoutException);
    }
    assertTrue(transactionManager.hasAbortableError());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.jupiter.api.Test)

Example 68 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class TransactionManagerTest method testBasicTransaction.

@Test
public void testBasicTransaction() throws InterruptedException {
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    assertFalse(responseFuture.isDone());
    prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
    prepareProduceResponse(Errors.NONE, producerId, epoch);
    assertFalse(transactionManager.transactionContainsPartition(tp0));
    assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
    runUntil(() -> transactionManager.transactionContainsPartition(tp0));
    assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
    assertFalse(responseFuture.isDone());
    runUntil(responseFuture::isDone);
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(tp1, new OffsetAndMetadata(1));
    TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
    assertFalse(transactionManager.hasPendingOffsetCommits());
    prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
    runUntil(transactionManager::hasPendingOffsetCommits);
    // the result doesn't complete until TxnOffsetCommit returns
    assertFalse(addOffsetsResult.isCompleted());
    Map<TopicPartition, Errors> txnOffsetCommitResponse = new HashMap<>();
    txnOffsetCommitResponse.put(tp1, Errors.NONE);
    prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
    prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse);
    assertNull(transactionManager.coordinator(CoordinatorType.GROUP));
    runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
    assertTrue(transactionManager.hasPendingOffsetCommits());
    runUntil(() -> !transactionManager.hasPendingOffsetCommits());
    // We should only be done after both RPCs complete.
    assertTrue(addOffsetsResult.isCompleted());
    transactionManager.beginCommit();
    prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
    runUntil(() -> !transactionManager.hasOngoingTransaction());
    assertFalse(transactionManager.isCompleting());
    assertFalse(transactionManager.transactionContainsPartition(tp0));
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Errors(org.apache.kafka.common.protocol.Errors) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Test(org.junit.jupiter.api.Test)

Example 69 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class ApiError method fromThrowable.

public static ApiError fromThrowable(Throwable t) {
    Throwable throwableToBeEncoded = t;
    // completion stage (as might be the case for requests sent to the controller in `ControllerApis`)
    if (t instanceof CompletionException || t instanceof ExecutionException) {
        throwableToBeEncoded = t.getCause();
    }
    // Avoid populating the error message if it's a generic one. Also don't populate error
    // message for UNKNOWN_SERVER_ERROR to ensure we don't leak sensitive information.
    Errors error = Errors.forException(throwableToBeEncoded);
    String message = error == Errors.UNKNOWN_SERVER_ERROR || error.message().equals(throwableToBeEncoded.getMessage()) ? null : throwableToBeEncoded.getMessage();
    return new ApiError(error, message);
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) CompletionException(java.util.concurrent.CompletionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 70 with Errors

use of org.apache.kafka.common.protocol.Errors in project kafka by apache.

the class AlterClientQuotasResponse method complete.

public void complete(Map<ClientQuotaEntity, KafkaFutureImpl<Void>> futures) {
    for (EntryData entryData : data.entries()) {
        Map<String, String> entityEntries = new HashMap<>(entryData.entity().size());
        for (EntityData entityData : entryData.entity()) {
            entityEntries.put(entityData.entityType(), entityData.entityName());
        }
        ClientQuotaEntity entity = new ClientQuotaEntity(entityEntries);
        KafkaFutureImpl<Void> future = futures.get(entity);
        if (future == null) {
            throw new IllegalArgumentException("Future map must contain entity " + entity);
        }
        Errors error = Errors.forCode(entryData.errorCode());
        if (error == Errors.NONE) {
            future.complete(null);
        } else {
            future.completeExceptionally(error.exception(entryData.errorMessage()));
        }
    }
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) EntryData(org.apache.kafka.common.message.AlterClientQuotasResponseData.EntryData) HashMap(java.util.HashMap) EntityData(org.apache.kafka.common.message.AlterClientQuotasResponseData.EntityData) ClientQuotaEntity(org.apache.kafka.common.quota.ClientQuotaEntity)

Aggregations

Errors (org.apache.kafka.common.protocol.Errors)167 HashMap (java.util.HashMap)115 TopicPartition (org.apache.kafka.common.TopicPartition)87 Map (java.util.Map)61 ArrayList (java.util.ArrayList)46 LinkedHashMap (java.util.LinkedHashMap)31 Test (org.junit.jupiter.api.Test)31 List (java.util.List)19 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)19 HashSet (java.util.HashSet)18 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)18 InvalidTopicException (org.apache.kafka.common.errors.InvalidTopicException)17 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)17 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)17 KafkaException (org.apache.kafka.common.KafkaException)16 Node (org.apache.kafka.common.Node)16 Cluster (org.apache.kafka.common.Cluster)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)14 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)14 Collections (java.util.Collections)13