Search in sources :

Example 21 with ProducerIdAndEpoch

use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.

the class SenderTest method testRecordsFlushedImmediatelyOnTransactionCompletion.

@Test
public void testRecordsFlushedImmediatelyOnTransactionCompletion() throws Exception {
    try (Metrics m = new Metrics()) {
        int lingerMs = 50;
        SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
        TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions);
        setupWithTransactionState(txnManager, lingerMs);
        Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
        // Begin a transaction and successfully add one partition to it.
        ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
        doInitTransactions(txnManager, producerIdAndEpoch);
        txnManager.beginTransaction();
        addPartitionToTxn(sender, txnManager, tp0);
        // Send a couple records and assert that they are not sent immediately (due to linger).
        appendToAccumulator(tp0);
        appendToAccumulator(tp0);
        sender.runOnce();
        assertFalse(client.hasInFlightRequests());
        // Now begin the commit and assert that the Produce request is sent immediately
        // without waiting for the linger.
        TransactionalRequestResult commitResult = txnManager.beginCommit();
        runUntil(sender, client::hasInFlightRequests);
        // Respond to the produce request and wait for the EndTxn request to be sent.
        respondToProduce(tp0, Errors.NONE, 1L);
        runUntil(sender, txnManager::hasInFlightRequest);
        // Respond to the expected EndTxn request.
        respondToEndTxn(Errors.NONE);
        runUntil(sender, txnManager::isReady);
        assertTrue(commitResult.isSuccessful());
        commitResult.await();
        // Finally, we want to assert that the linger time is still effective
        // when the new transaction begins.
        txnManager.beginTransaction();
        addPartitionToTxn(sender, txnManager, tp0);
        appendToAccumulator(tp0);
        appendToAccumulator(tp0);
        time.sleep(lingerMs - 1);
        sender.runOnce();
        assertFalse(client.hasInFlightRequests());
        assertTrue(accumulator.hasUndrained());
        time.sleep(1);
        runUntil(sender, client::hasInFlightRequests);
        assertFalse(accumulator.hasUndrained());
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) Test(org.junit.jupiter.api.Test)

Example 22 with ProducerIdAndEpoch

use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.

the class SenderTest method testIdempotentSplitBatchAndSend.

@Test
public void testIdempotentSplitBatchAndSend() throws Exception {
    TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1);
    TransactionManager txnManager = createTransactionManager();
    ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
    setupWithTransactionState(txnManager);
    prepareAndReceiveInitProducerId(123456L, Errors.NONE);
    assertTrue(txnManager.hasProducerId());
    testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) Test(org.junit.jupiter.api.Test)

Example 23 with ProducerIdAndEpoch

use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.

the class SenderTest method testAwaitPendingRecordsBeforeCommittingTransaction.

@Test
public void testAwaitPendingRecordsBeforeCommittingTransaction() throws Exception {
    try (Metrics m = new Metrics()) {
        SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
        TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions);
        setupWithTransactionState(txnManager);
        Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
        // Begin a transaction and successfully add one partition to it.
        ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
        doInitTransactions(txnManager, producerIdAndEpoch);
        txnManager.beginTransaction();
        addPartitionToTxn(sender, txnManager, tp0);
        // Send one Produce request.
        appendToAccumulator(tp0);
        runUntil(sender, () -> client.requests().size() == 1);
        assertFalse(accumulator.hasUndrained());
        assertTrue(client.hasInFlightRequests());
        assertTrue(txnManager.hasInflightBatches(tp0));
        // Enqueue another record and then commit the transaction. We expect the unsent record to
        // get sent before the transaction can be completed.
        appendToAccumulator(tp0);
        txnManager.beginCommit();
        runUntil(sender, () -> client.requests().size() == 2);
        assertTrue(txnManager.isCompleting());
        assertFalse(txnManager.hasInFlightRequest());
        assertTrue(txnManager.hasInflightBatches(tp0));
        // Now respond to the pending Produce requests.
        respondToProduce(tp0, Errors.NONE, 0L);
        respondToProduce(tp0, Errors.NONE, 1L);
        runUntil(sender, txnManager::hasInFlightRequest);
        // Finally, respond to the expected EndTxn request.
        respondToEndTxn(Errors.NONE);
        runUntil(sender, txnManager::isReady);
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) Test(org.junit.jupiter.api.Test)

Example 24 with ProducerIdAndEpoch

use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.

the class SenderTest method testDoNotPollWhenNoRequestSent.

@Test
public void testDoNotPollWhenNoRequestSent() {
    client = spy(new MockClient(time, metadata));
    TransactionManager txnManager = new TransactionManager(logContext, "testDoNotPollWhenNoRequestSent", 6000, 100, apiVersions);
    ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
    setupWithTransactionState(txnManager);
    doInitTransactions(txnManager, producerIdAndEpoch);
    // doInitTransactions calls sender.doOnce three times, only two requests are sent, so we should only poll twice
    verify(client, times(2)).poll(eq(RETRY_BACKOFF_MS), anyLong());
}
Also used : ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 25 with ProducerIdAndEpoch

use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.

the class SenderTest method testTooLargeBatchesAreSafelyRemoved.

@Test
public void testTooLargeBatchesAreSafelyRemoved() throws InterruptedException {
    ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
    TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions);
    setupWithTransactionState(txnManager, false, null);
    doInitTransactions(txnManager, producerIdAndEpoch);
    txnManager.beginTransaction();
    txnManager.maybeAddPartition(tp0);
    client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp0, Errors.NONE)));
    sender.runOnce();
    // create a producer batch with more than one record so it is eligible for splitting
    appendToAccumulator(tp0, time.milliseconds(), "key1", "value1");
    appendToAccumulator(tp0, time.milliseconds(), "key2", "value2");
    // send request
    sender.runOnce();
    assertEquals(1, sender.inFlightBatches(tp0).size());
    // return a MESSAGE_TOO_LARGE error
    client.respond(produceResponse(tp0, -1, Errors.MESSAGE_TOO_LARGE, -1));
    sender.runOnce();
    // process retried response
    sender.runOnce();
    client.respond(produceResponse(tp0, 0, Errors.NONE, 0));
    sender.runOnce();
    // In-flight batches should be empty. Sleep past the expiration time of the batch and run once, no error should be thrown
    assertEquals(0, sender.inFlightBatches(tp0).size());
    time.sleep(2000);
    sender.runOnce();
}
Also used : AddPartitionsToTxnResponse(org.apache.kafka.common.requests.AddPartitionsToTxnResponse) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) Test(org.junit.jupiter.api.Test)

Aggregations

ProducerIdAndEpoch (org.apache.kafka.common.utils.ProducerIdAndEpoch)25 Test (org.junit.jupiter.api.Test)20 TopicPartition (org.apache.kafka.common.TopicPartition)11 AddPartitionsToTxnResponse (org.apache.kafka.common.requests.AddPartitionsToTxnResponse)8 Metrics (org.apache.kafka.common.metrics.Metrics)5 TimeoutException (org.apache.kafka.common.errors.TimeoutException)4 Node (org.apache.kafka.common.Node)3 ProduceResponse (org.apache.kafka.common.requests.ProduceResponse)3 ArrayList (java.util.ArrayList)2 ApiVersions (org.apache.kafka.clients.ApiVersions)2 MockClient (org.apache.kafka.clients.MockClient)2 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2 EndTxnResponseData (org.apache.kafka.common.message.EndTxnResponseData)2 EndTxnResponse (org.apache.kafka.common.requests.EndTxnResponse)2 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)2 LogContext (org.apache.kafka.common.utils.LogContext)2 ArrayDeque (java.util.ArrayDeque)1 Arrays.asList (java.util.Arrays.asList)1 List (java.util.List)1