use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class KafkaConsumerTest method listOffsetsResponse.
private ListOffsetsResponse listOffsetsResponse(Map<TopicPartition, Long> partitionOffsets, Map<TopicPartition, Errors> partitionErrors) {
Map<String, ListOffsetsTopicResponse> responses = new HashMap<>();
for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
TopicPartition tp = partitionOffset.getKey();
ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(partitionOffset.getValue()));
}
for (Map.Entry<TopicPartition, Errors> partitionError : partitionErrors.entrySet()) {
TopicPartition tp = partitionError.getKey();
ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(partitionError.getValue().code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET));
}
ListOffsetsResponseData data = new ListOffsetsResponseData().setTopics(new ArrayList<>(responses.values()));
return new ListOffsetsResponse(data);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class TransactionManagerTest method testTransitionToAbortableErrorOnMultipleBatchExpiry.
@Test
public void testTransitionToAbortableErrorOnMultipleBatchExpiry() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
transactionManager.maybeAddPartition(tp1);
Future<RecordMetadata> firstBatchResponse = appendToAccumulator(tp0);
Future<RecordMetadata> secondBatchResponse = appendToAccumulator(tp1);
assertFalse(firstBatchResponse.isDone());
assertFalse(secondBatchResponse.isDone());
Map<TopicPartition, Errors> partitionErrors = new HashMap<>();
partitionErrors.put(tp0, Errors.NONE);
partitionErrors.put(tp1, Errors.NONE);
prepareAddPartitionsToTxn(partitionErrors);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
// Check that only addPartitions was sent.
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.transactionContainsPartition(tp1));
assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
assertFalse(firstBatchResponse.isDone());
assertFalse(secondBatchResponse.isDone());
// Sleep 10 seconds to make sure that the batches in the queue would be expired if they can't be drained.
time.sleep(10000);
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
Node clusterNode = metadata.fetch().nodes().get(0);
client.disconnect(clusterNode.idString());
client.backoff(clusterNode, 100);
runUntil(firstBatchResponse::isDone);
runUntil(secondBatchResponse::isDone);
try {
// make sure the produce was expired.
firstBatchResponse.get();
fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
try {
// make sure the produce was expired.
secondBatchResponse.get();
fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
assertTrue(transactionManager.hasAbortableError());
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class TransactionManagerTest method testBasicTransaction.
@Test
public void testBasicTransaction() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, epoch);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
assertFalse(responseFuture.isDone());
runUntil(responseFuture::isDone);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp1, new OffsetAndMetadata(1));
TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
assertFalse(transactionManager.hasPendingOffsetCommits());
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
runUntil(transactionManager::hasPendingOffsetCommits);
// the result doesn't complete until TxnOffsetCommit returns
assertFalse(addOffsetsResult.isCompleted());
Map<TopicPartition, Errors> txnOffsetCommitResponse = new HashMap<>();
txnOffsetCommitResponse.put(tp1, Errors.NONE);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse);
assertNull(transactionManager.coordinator(CoordinatorType.GROUP));
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
assertTrue(transactionManager.hasPendingOffsetCommits());
runUntil(() -> !transactionManager.hasPendingOffsetCommits());
// We should only be done after both RPCs complete.
assertTrue(addOffsetsResult.isCompleted());
transactionManager.beginCommit();
prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(() -> !transactionManager.hasOngoingTransaction());
assertFalse(transactionManager.isCompleting());
assertFalse(transactionManager.transactionContainsPartition(tp0));
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class ApiError method fromThrowable.
public static ApiError fromThrowable(Throwable t) {
Throwable throwableToBeEncoded = t;
// completion stage (as might be the case for requests sent to the controller in `ControllerApis`)
if (t instanceof CompletionException || t instanceof ExecutionException) {
throwableToBeEncoded = t.getCause();
}
// Avoid populating the error message if it's a generic one. Also don't populate error
// message for UNKNOWN_SERVER_ERROR to ensure we don't leak sensitive information.
Errors error = Errors.forException(throwableToBeEncoded);
String message = error == Errors.UNKNOWN_SERVER_ERROR || error.message().equals(throwableToBeEncoded.getMessage()) ? null : throwableToBeEncoded.getMessage();
return new ApiError(error, message);
}
use of org.apache.kafka.common.protocol.Errors in project kafka by apache.
the class AlterClientQuotasResponse method complete.
public void complete(Map<ClientQuotaEntity, KafkaFutureImpl<Void>> futures) {
for (EntryData entryData : data.entries()) {
Map<String, String> entityEntries = new HashMap<>(entryData.entity().size());
for (EntityData entityData : entryData.entity()) {
entityEntries.put(entityData.entityType(), entityData.entityName());
}
ClientQuotaEntity entity = new ClientQuotaEntity(entityEntries);
KafkaFutureImpl<Void> future = futures.get(entity);
if (future == null) {
throw new IllegalArgumentException("Future map must contain entity " + entity);
}
Errors error = Errors.forCode(entryData.errorCode());
if (error == Errors.NONE) {
future.complete(null);
} else {
future.completeExceptionally(error.exception(entryData.errorMessage()));
}
}
}
Aggregations