Search in sources :

Example 71 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class TransactionManagerTest method testFindCoordinatorAllowedInAbortableErrorState.

@Test
public void testFindCoordinatorAllowedInAbortableErrorState() throws InterruptedException {
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    assertFalse(responseFuture.isDone());
    runUntil(transactionManager::hasInFlightRequest);
    transactionManager.transitionToAbortableError(new KafkaException());
    sendAddPartitionsToTxnResponse(Errors.NOT_COORDINATOR, tp0, epoch, producerId);
    runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) == null);
    prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId);
    runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null);
    assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION));
    assertTrue(transactionManager.hasAbortableError());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.jupiter.api.Test)

Example 72 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class TransactionManagerTest method testCommitTransactionWithInFlightProduceRequest.

@Test
public void testCommitTransactionWithInFlightProduceRequest() throws Exception {
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    prepareAddPartitionsToTxn(tp0, Errors.NONE);
    runUntil(() -> !transactionManager.hasPartitionsToAdd());
    assertTrue(accumulator.hasUndrained());
    accumulator.beginFlush();
    runUntil(() -> !accumulator.hasUndrained());
    assertFalse(accumulator.hasUndrained());
    assertTrue(accumulator.hasIncomplete());
    assertFalse(transactionManager.hasInFlightRequest());
    // now we begin the commit with the produce request still pending
    transactionManager.beginCommit();
    AtomicInteger numRuns = new AtomicInteger(0);
    runUntil(() -> numRuns.incrementAndGet() >= 4);
    assertFalse(accumulator.hasUndrained());
    assertTrue(accumulator.hasIncomplete());
    assertFalse(transactionManager.hasInFlightRequest());
    assertFalse(responseFuture.isDone());
    // now the produce response returns
    sendProduceResponse(Errors.NONE, producerId, epoch);
    runUntil(responseFuture::isDone);
    assertFalse(accumulator.hasUndrained());
    assertFalse(accumulator.hasIncomplete());
    assertFalse(transactionManager.hasInFlightRequest());
    // now we send EndTxn
    runUntil(transactionManager::hasInFlightRequest);
    sendEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
    runUntil(transactionManager::isReady);
    assertFalse(transactionManager.hasInFlightRequest());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Test(org.junit.jupiter.api.Test)

Example 73 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class TransactionManagerTest method testBasicTransaction.

@Test
public void testBasicTransaction() throws InterruptedException {
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    assertFalse(responseFuture.isDone());
    prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
    prepareProduceResponse(Errors.NONE, producerId, epoch);
    assertFalse(transactionManager.transactionContainsPartition(tp0));
    assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
    runUntil(() -> transactionManager.transactionContainsPartition(tp0));
    assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
    assertFalse(responseFuture.isDone());
    runUntil(responseFuture::isDone);
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(tp1, new OffsetAndMetadata(1));
    TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
    assertFalse(transactionManager.hasPendingOffsetCommits());
    prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
    runUntil(transactionManager::hasPendingOffsetCommits);
    // the result doesn't complete until TxnOffsetCommit returns
    assertFalse(addOffsetsResult.isCompleted());
    Map<TopicPartition, Errors> txnOffsetCommitResponse = new HashMap<>();
    txnOffsetCommitResponse.put(tp1, Errors.NONE);
    prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
    prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse);
    assertNull(transactionManager.coordinator(CoordinatorType.GROUP));
    runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
    assertTrue(transactionManager.hasPendingOffsetCommits());
    runUntil(() -> !transactionManager.hasPendingOffsetCommits());
    // We should only be done after both RPCs complete.
    assertTrue(addOffsetsResult.isCompleted());
    transactionManager.beginCommit();
    prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
    runUntil(() -> !transactionManager.hasOngoingTransaction());
    assertFalse(transactionManager.isCompleting());
    assertFalse(transactionManager.transactionContainsPartition(tp0));
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Errors(org.apache.kafka.common.protocol.Errors) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Test(org.junit.jupiter.api.Test)

Example 74 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class TransactionManagerTest method testRecoveryFromAbortableErrorTransactionNotStarted.

@Test
public void testRecoveryFromAbortableErrorTransactionNotStarted() throws Exception {
    final TopicPartition unauthorizedPartition = new TopicPartition("foo", 0);
    doInitTransactions();
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(unauthorizedPartition);
    Future<RecordMetadata> responseFuture = appendToAccumulator(unauthorizedPartition);
    prepareAddPartitionsToTxn(singletonMap(unauthorizedPartition, Errors.TOPIC_AUTHORIZATION_FAILED));
    runUntil(() -> !client.hasPendingResponses());
    assertTrue(transactionManager.hasAbortableError());
    TransactionalRequestResult abortResult = transactionManager.beginAbort();
    runUntil(responseFuture::isDone);
    assertProduceFutureFailed(responseFuture);
    // No partitions added, so no need to prepare EndTxn response
    runUntil(transactionManager::isReady);
    assertFalse(transactionManager.hasPartitionsToAdd());
    assertFalse(accumulator.hasIncomplete());
    assertTrue(abortResult.isSuccessful());
    abortResult.await();
    // ensure we can now start a new transaction
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    responseFuture = appendToAccumulator(tp0);
    prepareAddPartitionsToTxn(singletonMap(tp0, Errors.NONE));
    runUntil(() -> transactionManager.isPartitionAdded(tp0));
    assertFalse(transactionManager.hasPartitionsToAdd());
    transactionManager.beginCommit();
    prepareProduceResponse(Errors.NONE, producerId, epoch);
    runUntil(responseFuture::isDone);
    assertNotNull(responseFuture.get());
    prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
    runUntil(transactionManager::isReady);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.jupiter.api.Test)

Example 75 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class TransactionManagerTest method testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError.

@Test
public void testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError() throws InterruptedException {
    final short initialEpoch = 1;
    final short bumpedEpoch = 2;
    doInitTransactions(producerId, initialEpoch);
    transactionManager.beginTransaction();
    transactionManager.maybeAddPartition(tp0);
    Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
    assertFalse(responseFuture.isDone());
    prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId);
    prepareProduceResponse(Errors.NONE, producerId, initialEpoch);
    runUntil(responseFuture::isDone);
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(tp0, new OffsetAndMetadata(1));
    transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
    assertFalse(transactionManager.hasPendingOffsetCommits());
    prepareAddOffsetsToTxnResponse(Errors.INVALID_PRODUCER_ID_MAPPING, consumerGroupId, producerId, initialEpoch);
    // Send AddOffsetsRequest
    runUntil(transactionManager::hasAbortableError);
    TransactionalRequestResult abortResult = transactionManager.beginAbort();
    prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, producerId, initialEpoch);
    prepareInitPidResponse(Errors.NONE, false, producerId, bumpedEpoch);
    runUntil(abortResult::isCompleted);
    assertEquals(bumpedEpoch, transactionManager.producerIdAndEpoch().epoch);
    assertTrue(abortResult.isSuccessful());
    // make sure we are ready for a transaction now.
    assertTrue(transactionManager.isReady());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Test(org.junit.jupiter.api.Test)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)191 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)46 ExecutionException (java.util.concurrent.ExecutionException)35 Callback (org.apache.kafka.clients.producer.Callback)33 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12