use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testFindCoordinatorAllowedInAbortableErrorState.
@Test
public void testFindCoordinatorAllowedInAbortableErrorState() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
runUntil(transactionManager::hasInFlightRequest);
transactionManager.transitionToAbortableError(new KafkaException());
sendAddPartitionsToTxnResponse(Errors.NOT_COORDINATOR, tp0, epoch, producerId);
runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) == null);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId);
runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null);
assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION));
assertTrue(transactionManager.hasAbortableError());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testCommitTransactionWithInFlightProduceRequest.
@Test
public void testCommitTransactionWithInFlightProduceRequest() throws Exception {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
prepareAddPartitionsToTxn(tp0, Errors.NONE);
runUntil(() -> !transactionManager.hasPartitionsToAdd());
assertTrue(accumulator.hasUndrained());
accumulator.beginFlush();
runUntil(() -> !accumulator.hasUndrained());
assertFalse(accumulator.hasUndrained());
assertTrue(accumulator.hasIncomplete());
assertFalse(transactionManager.hasInFlightRequest());
// now we begin the commit with the produce request still pending
transactionManager.beginCommit();
AtomicInteger numRuns = new AtomicInteger(0);
runUntil(() -> numRuns.incrementAndGet() >= 4);
assertFalse(accumulator.hasUndrained());
assertTrue(accumulator.hasIncomplete());
assertFalse(transactionManager.hasInFlightRequest());
assertFalse(responseFuture.isDone());
// now the produce response returns
sendProduceResponse(Errors.NONE, producerId, epoch);
runUntil(responseFuture::isDone);
assertFalse(accumulator.hasUndrained());
assertFalse(accumulator.hasIncomplete());
assertFalse(transactionManager.hasInFlightRequest());
// now we send EndTxn
runUntil(transactionManager::hasInFlightRequest);
sendEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(transactionManager::isReady);
assertFalse(transactionManager.hasInFlightRequest());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testBasicTransaction.
@Test
public void testBasicTransaction() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, epoch);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
assertFalse(responseFuture.isDone());
runUntil(responseFuture::isDone);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp1, new OffsetAndMetadata(1));
TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
assertFalse(transactionManager.hasPendingOffsetCommits());
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
runUntil(transactionManager::hasPendingOffsetCommits);
// the result doesn't complete until TxnOffsetCommit returns
assertFalse(addOffsetsResult.isCompleted());
Map<TopicPartition, Errors> txnOffsetCommitResponse = new HashMap<>();
txnOffsetCommitResponse.put(tp1, Errors.NONE);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse);
assertNull(transactionManager.coordinator(CoordinatorType.GROUP));
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
assertTrue(transactionManager.hasPendingOffsetCommits());
runUntil(() -> !transactionManager.hasPendingOffsetCommits());
// We should only be done after both RPCs complete.
assertTrue(addOffsetsResult.isCompleted());
transactionManager.beginCommit();
prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(() -> !transactionManager.hasOngoingTransaction());
assertFalse(transactionManager.isCompleting());
assertFalse(transactionManager.transactionContainsPartition(tp0));
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testRecoveryFromAbortableErrorTransactionNotStarted.
@Test
public void testRecoveryFromAbortableErrorTransactionNotStarted() throws Exception {
final TopicPartition unauthorizedPartition = new TopicPartition("foo", 0);
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(unauthorizedPartition);
Future<RecordMetadata> responseFuture = appendToAccumulator(unauthorizedPartition);
prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED));
runUntil(() -> !client.hasPendingResponses());
assertTrue(transactionManager.hasAbortableError());
TransactionalRequestResult abortResult = transactionManager.beginAbort();
runUntil(responseFuture::isDone);
assertProduceFutureFailed(responseFuture);
// No partitions added, so no need to prepare EndTxn response
runUntil(transactionManager::isReady);
assertFalse(transactionManager.hasPartitionsToAdd());
assertFalse(accumulator.hasIncomplete());
assertTrue(abortResult.isSuccessful());
abortResult.await();
// ensure we can now start a new transaction
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
responseFuture = appendToAccumulator(tp0);
prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE));
runUntil(() -> transactionManager.isPartitionAdded(tp0));
assertFalse(transactionManager.hasPartitionsToAdd());
transactionManager.beginCommit();
prepareProduceResponse(Errors.NONE, producerId, epoch);
runUntil(responseFuture::isDone);
assertNotNull(responseFuture.get());
prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(transactionManager::isReady);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class TransactionManagerTest method testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError.
@Test
public void testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError() throws InterruptedException {
final short initialEpoch = 1;
final short bumpedEpoch = 2;
doInitTransactions(producerId, initialEpoch);
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, initialEpoch);
runUntil(responseFuture::isDone);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp0, new OffsetAndMetadata(1));
transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
assertFalse(transactionManager.hasPendingOffsetCommits());
prepareAddOffsetsToTxnResponse(Errors.INVALID_PRODUCER_ID_MAPPING, consumerGroupId, producerId, initialEpoch);
// Send AddOffsetsRequest
runUntil(transactionManager::hasAbortableError);
TransactionalRequestResult abortResult = transactionManager.beginAbort();
prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, producerId, initialEpoch);
prepareInitPidResponse(Errors.NONE, false, producerId, bumpedEpoch);
runUntil(abortResult::isCompleted);
assertEquals(bumpedEpoch, transactionManager.producerIdAndEpoch().epoch);
assertTrue(abortResult.isSuccessful());
// make sure we are ready for a transaction now.
assertTrue(transactionManager.isReady());
}
Aggregations