use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class ProducerInterceptorsTest method testOnAcknowledgementChain.
@Test
public void testOnAcknowledgementChain() {
List<ProducerInterceptor<Integer, String>> interceptorList = new ArrayList<>();
// we are testing two different interceptors by configuring the same interceptor differently, which is not
// how it would be done in KafkaProducer, but ok for testing interceptor callbacks
AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One");
AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two");
interceptorList.add(interceptor1);
interceptorList.add(interceptor2);
ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList);
// verify onAck is called on all interceptors
RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, 0, 0);
interceptors.onAcknowledgement(meta, null);
assertEquals(2, onAckCount);
// verify that onAcknowledgement exceptions do not propagate
interceptor1.injectOnAcknowledgementError(true);
interceptors.onAcknowledgement(meta, null);
assertEquals(4, onAckCount);
interceptor2.injectOnAcknowledgementError(true);
interceptors.onAcknowledgement(meta, null);
assertEquals(6, onAckCount);
interceptors.close();
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testTransitionToAbortableErrorOnMultipleBatchExpiry.
@Test
public void testTransitionToAbortableErrorOnMultipleBatchExpiry() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
transactionManager.maybeAddPartition(tp1);
Future<RecordMetadata> firstBatchResponse = appendToAccumulator(tp0);
Future<RecordMetadata> secondBatchResponse = appendToAccumulator(tp1);
assertFalse(firstBatchResponse.isDone());
assertFalse(secondBatchResponse.isDone());
Map<TopicPartition, Errors> partitionErrors = new HashMap<>();
partitionErrors.put(tp0, Errors.NONE);
partitionErrors.put(tp1, Errors.NONE);
prepareAddPartitionsToTxn(partitionErrors);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
// Check that only addPartitions was sent.
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.transactionContainsPartition(tp1));
assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
assertFalse(firstBatchResponse.isDone());
assertFalse(secondBatchResponse.isDone());
// Sleep 10 seconds to make sure that the batches in the queue would be expired if they can't be drained.
time.sleep(10000);
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
Node clusterNode = metadata.fetch().nodes().get(0);
client.disconnect(clusterNode.idString());
client.backoff(clusterNode, 100);
runUntil(firstBatchResponse::isDone);
runUntil(secondBatchResponse::isDone);
try {
// make sure the produce was expired.
firstBatchResponse.get();
fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
try {
// make sure the produce was expired.
secondBatchResponse.get();
fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
assertTrue(transactionManager.hasAbortableError());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testRecoveryFromAbortableErrorTransactionStarted.
@Test
public void testRecoveryFromAbortableErrorTransactionStarted() throws Exception {
final TopicPartition unauthorizedPartition = new TopicPartition("foo", 0);
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
prepareAddPartitionsToTxn(tp0, Errors.NONE);
Future<RecordMetadata> authorizedTopicProduceFuture = appendToAccumulator(unauthorizedPartition);
runUntil(() -> transactionManager.isPartitionAdded(tp0));
transactionManager.maybeAddPartition(unauthorizedPartition);
Future<RecordMetadata> unauthorizedTopicProduceFuture = appendToAccumulator(unauthorizedPartition);
prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED));
runUntil(transactionManager::hasAbortableError);
assertTrue(transactionManager.isPartitionAdded(tp0));
assertFalse(transactionManager.isPartitionAdded(unauthorizedPartition));
assertFalse(authorizedTopicProduceFuture.isDone());
assertFalse(unauthorizedTopicProduceFuture.isDone());
prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, producerId, epoch);
TransactionalRequestResult result = transactionManager.beginAbort();
runUntil(transactionManager::isReady);
// neither produce request has been sent, so they should both be failed immediately
assertProduceFutureFailed(authorizedTopicProduceFuture);
assertProduceFutureFailed(unauthorizedTopicProduceFuture);
assertFalse(transactionManager.hasPartitionsToAdd());
assertFalse(accumulator.hasIncomplete());
assertTrue(result.isSuccessful());
result.await();
// ensure we can now start a new transaction
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
FutureRecordMetadata nextTransactionFuture = appendToAccumulator(tp0);
prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE));
runUntil(() -> transactionManager.isPartitionAdded(tp0));
assertFalse(transactionManager.hasPartitionsToAdd());
transactionManager.beginCommit();
prepareProduceResponse(Errors.NONE, producerId, epoch);
runUntil(nextTransactionFuture::isDone);
assertNotNull(nextTransactionFuture.get());
prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(transactionManager::isReady);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testTransitionToAbortableErrorOnBatchExpiry.
@Test
public void testTransitionToAbortableErrorOnBatchExpiry() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
// Check that only addPartitions was sent.
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
assertFalse(responseFuture.isDone());
// Sleep 10 seconds to make sure that the batches in the queue would be expired if they can't be drained.
time.sleep(10000);
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
Node clusterNode = metadata.fetch().nodes().get(0);
client.disconnect(clusterNode.idString());
client.backoff(clusterNode, 100);
runUntil(responseFuture::isDone);
try {
// make sure the produce was expired.
responseFuture.get();
fail("Expected to get a TimeoutException since the queued ProducerBatch should have been expired");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
assertTrue(transactionManager.hasAbortableError());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testAbortTransactionAndResetSequenceNumberOnUnknownProducerId.
@Test
public void testAbortTransactionAndResetSequenceNumberOnUnknownProducerId() throws InterruptedException {
// Set the InitProducerId version such that bumping the epoch number is not supported. This will test the case
// where the sequence number is reset on an UnknownProducerId error, allowing subsequent transactions to
// append to the log successfully
apiVersions.update("0", new NodeApiVersions(Arrays.asList(new ApiVersion().setApiKey(ApiKeys.INIT_PRODUCER_ID.id).setMinVersion((short) 0).setMaxVersion((short) 1), new ApiVersion().setApiKey(ApiKeys.PRODUCE.id).setMinVersion((short) 0).setMaxVersion((short) 7))));
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp1);
Future<RecordMetadata> successPartitionResponseFuture = appendToAccumulator(tp1);
prepareAddPartitionsToTxnResponse(Errors.NONE, tp1, epoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, epoch, tp1);
runUntil(successPartitionResponseFuture::isDone);
assertTrue(transactionManager.isPartitionAdded(tp1));
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture0 = appendToAccumulator(tp0);
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, epoch);
runUntil(responseFuture0::isDone);
assertTrue(transactionManager.isPartitionAdded(tp0));
Future<RecordMetadata> responseFuture1 = appendToAccumulator(tp0);
prepareProduceResponse(Errors.NONE, producerId, epoch);
runUntil(responseFuture1::isDone);
Future<RecordMetadata> responseFuture2 = appendToAccumulator(tp0);
client.prepareResponse(produceRequestMatcher(producerId, epoch, tp0), produceResponse(tp0, 0, Errors.UNKNOWN_PRODUCER_ID, 0, 0));
runUntil(responseFuture2::isDone);
assertTrue(transactionManager.hasAbortableError());
TransactionalRequestResult abortResult = transactionManager.beginAbort();
prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, producerId, epoch);
runUntil(abortResult::isCompleted);
assertTrue(abortResult.isSuccessful());
abortResult.await();
// make sure we are ready for a transaction now.
assertTrue(transactionManager.isReady());
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
runUntil(() -> transactionManager.isPartitionAdded(tp0));
assertEquals(0, transactionManager.sequenceNumber(tp0).intValue());
assertEquals(1, transactionManager.sequenceNumber(tp1).intValue());
}
Aggregations