use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testForceShutdownWithIncompleteTransaction.
@Timeout(10L)
@Test
public void testForceShutdownWithIncompleteTransaction() {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
TransactionManager txnManager = new TransactionManager(logContext, "testForceShutdownWithIncompleteTransaction", 6000, 100, apiVersions);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testForceShutdownWithIncompleteTransaction", 1);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
// Try to commit the transaction but it won't happen as we'll forcefully close the sender
TransactionalRequestResult commitResult = txnManager.beginCommit();
sender.forceClose();
sender.run();
assertThrows(KafkaException.class, commitResult::await, "The test expected to throw a KafkaException for forcefully closing the sender");
} finally {
m.close();
}
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testTransactionalRequestsSentOnShutdown.
@Test
public void testTransactionalRequestsSentOnShutdown() {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testTransactionalRequestsSentOnShutdown", 1);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
sender.initiateClose();
txnManager.beginCommit();
AssertEndTxnRequestMatcher endTxnMatcher = new AssertEndTxnRequestMatcher(TransactionResult.COMMIT);
client.prepareResponse(endTxnMatcher, new EndTxnResponse(new EndTxnResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0)));
sender.run();
assertTrue(endTxnMatcher.matched, "Response didn't match in test");
} finally {
m.close();
}
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testTransactionAbortedExceptionOnAbortWithoutError.
@Test
public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException, ExecutionException {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions);
setupWithTransactionState(txnManager, false, null);
doInitTransactions(txnManager, producerIdAndEpoch);
// Begin the transaction
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp0, Errors.NONE)));
// Run it once so that the partition is added to the transaction.
sender.runOnce();
// Append a record to the accumulator.
FutureRecordMetadata metadata = appendToAccumulator(tp0, time.milliseconds(), "key", "value");
// Now abort the transaction manually.
txnManager.beginAbort();
// Try to send.
// This should abort the existing transaction and
// drain all the unsent batches with a TransactionAbortedException.
sender.runOnce();
// Now attempt to fetch the result for the record.
TestUtils.assertFutureThrows(metadata, TransactionAbortedException.class);
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testNodeNotReady.
/**
* Tests the code path where the target node to send FindCoordinator or InitProducerId
* is not ready.
*/
@Test
public void testNodeNotReady() {
final long producerId = 123456L;
time = new MockTime(10);
client = new MockClient(time, metadata);
TransactionManager transactionManager = new TransactionManager(new LogContext(), "testNodeNotReady", 60000, 100L, new ApiVersions());
setupWithTransactionState(transactionManager, false, null, true);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0);
transactionManager.initializeTransactions();
sender.runOnce();
Node node = metadata.fetch().nodes().get(0);
client.delayReady(node, REQUEST_TIMEOUT + 20);
prepareFindCoordinatorResponse(Errors.NONE, "testNodeNotReady");
sender.runOnce();
sender.runOnce();
assertNotNull(transactionManager.coordinator(CoordinatorType.TRANSACTION), "Coordinator not found");
client.throttle(node, REQUEST_TIMEOUT + 20);
prepareFindCoordinatorResponse(Errors.NONE, "Coordinator not found");
prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
waitForProducerId(transactionManager, producerIdAndEpoch);
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class TransactionManagerTest method testNoFailedBatchHandlingWhenTxnManagerIsInFatalError.
@Test
public void testNoFailedBatchHandlingWhenTxnManagerIsInFatalError() {
initializeTransactionManager(Optional.empty());
long producerId = 15L;
short epoch = 5;
initializeIdempotentProducerId(producerId, epoch);
TopicPartition tp0 = new TopicPartition("foo", 0);
ProducerBatch b1 = writeIdempotentBatchWithValue(transactionManager, tp0, "1");
// Handling b1 should bump the epoch after OutOfOrderSequenceException
transactionManager.handleFailedBatch(b1, new OutOfOrderSequenceException("out of sequence"), false);
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
ProducerIdAndEpoch idAndEpochAfterFirstBatch = new ProducerIdAndEpoch(producerId, (short) (epoch + 1));
assertEquals(idAndEpochAfterFirstBatch, transactionManager.producerIdAndEpoch());
transactionManager.transitionToFatalError(new KafkaException());
// The second batch should not bump the epoch as txn manager is already in fatal error state
ProducerBatch b2 = writeIdempotentBatchWithValue(transactionManager, tp0, "2");
transactionManager.handleFailedBatch(b2, new TimeoutException(), true);
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
assertEquals(idAndEpochAfterFirstBatch, transactionManager.producerIdAndEpoch());
}
Aggregations