Search in sources :

Example 21 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testUnsupportedVersionInProduceRequest.

@Test
public void testUnsupportedVersionInProduceRequest() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    setupWithTransactionState(transactionManager);
    client.setNode(new Node(1, "localhost", 33343));
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    client.prepareUnsupportedVersionResponse(new MockClient.RequestMatcher() {

        @Override
        public boolean matches(AbstractRequest body) {
            return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
        }
    });
    sender.run(time.milliseconds());
    assertFutureFailure(future, UnsupportedVersionException.class);
    // unsupported version errors are fatal, so we should continue seeing it on future sends
    assertTrue(transactionManager.hasFatalError());
    assertSendFailure(UnsupportedVersionException.class);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Node(org.apache.kafka.common.Node) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 22 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds.

@Test
public void testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
    // Send first ProduceRequest
    Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    String nodeId = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    // Send second ProduceRequest
    Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    assertEquals(2, client.inFlightRequestCount());
    assertFalse(request1.isDone());
    assertFalse(request2.isDone());
    assertTrue(client.isReady(node, time.milliseconds()));
    ClientRequest firstClientRequest = client.requests().peek();
    ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
    client.respondToRequest(secondClientRequest, produceResponse(tp0, 1, Errors.NONE, 1));
    // receive response 1
    sender.run(time.milliseconds());
    assertTrue(request2.isDone());
    assertEquals(1, request2.get().offset());
    assertFalse(request1.isDone());
    Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
    assertEquals(0, queuedBatches.size());
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.REQUEST_TIMED_OUT, -1));
    // receive response 0
    sender.run(time.milliseconds());
    // Make sure we requeued both batches in the correct order.
    assertEquals(1, queuedBatches.size());
    assertEquals(0, queuedBatches.peekFirst().baseSequence());
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    assertEquals(0, client.inFlightRequestCount());
    // resend request 0
    sender.run(time.milliseconds());
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    // Make sure we handle the out of order successful responses correctly.
    sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
    // receive response 0
    sender.run(time.milliseconds());
    assertEquals(0, queuedBatches.size());
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    assertEquals(0, client.inFlightRequestCount());
    assertFalse(client.hasInFlightRequests());
    assertTrue(request1.isDone());
    assertEquals(0, request1.get().offset());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest) Test(org.junit.Test)

Example 23 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testCorrectHandlingOfOutOfOrderResponses.

@Test
public void testCorrectHandlingOfOutOfOrderResponses() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
    // Send first ProduceRequest
    Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    String nodeId = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    // Send second ProduceRequest
    Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    assertEquals(2, client.inFlightRequestCount());
    assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    assertFalse(request1.isDone());
    assertFalse(request2.isDone());
    assertTrue(client.isReady(node, time.milliseconds()));
    ClientRequest firstClientRequest = client.requests().peek();
    ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
    client.respondToRequest(secondClientRequest, produceResponse(tp0, -1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1));
    // receive response 1
    sender.run(time.milliseconds());
    Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
    // Make sure that we are queueing the second batch first.
    assertEquals(1, queuedBatches.size());
    assertEquals(1, queuedBatches.peekFirst().baseSequence());
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.NOT_LEADER_FOR_PARTITION, -1));
    // receive response 0
    sender.run(time.milliseconds());
    // Make sure we requeued both batches in the correct order.
    assertEquals(2, queuedBatches.size());
    assertEquals(0, queuedBatches.peekFirst().baseSequence());
    assertEquals(1, queuedBatches.peekLast().baseSequence());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    assertEquals(0, client.inFlightRequestCount());
    assertFalse(request1.isDone());
    assertFalse(request2.isDone());
    // send request 0
    sender.run(time.milliseconds());
    assertEquals(1, client.inFlightRequestCount());
    // don't do anything, only one inflight allowed once we are retrying.
    sender.run(time.milliseconds());
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    // Make sure that the requests are sent in order, even though the previous responses were not in order.
    sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
    // receive response 0
    sender.run(time.milliseconds());
    assertEquals(0, transactionManager.lastAckedSequence(tp0));
    assertEquals(0, client.inFlightRequestCount());
    assertTrue(request1.isDone());
    assertEquals(0, request1.get().offset());
    // send request 1
    sender.run(time.milliseconds());
    assertEquals(1, client.inFlightRequestCount());
    sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1L);
    // receive response 1
    sender.run(time.milliseconds());
    assertFalse(client.hasInFlightRequests());
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    assertTrue(request2.isDone());
    assertEquals(1, request2.get().offset());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest) Test(org.junit.Test)

Example 24 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences.

@Test
public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
    // Send first ProduceRequest
    Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    Node node = this.cluster.nodes().get(0);
    time.sleep(10000L);
    client.disconnect(node.idString());
    client.blackout(node, 10);
    sender.run(time.milliseconds());
    assertFutureFailure(request1, TimeoutException.class);
    assertFalse(transactionManager.hasUnresolvedSequence(tp0));
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) Test(org.junit.Test)

Example 25 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testAbortRetryWhenProducerIdChanges.

@Test
@SuppressWarnings("deprecation")
public void testAbortRetryWhenProducerIdChanges() throws InterruptedException {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0));
    setupWithTransactionState(transactionManager);
    client.setNode(new Node(1, "localhost", 33343));
    int maxRetries = 10;
    Metrics m = new Metrics();
    SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
    Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions);
    Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    // connect.
    sender.run(time.milliseconds());
    // send.
    sender.run(time.milliseconds());
    String id = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(id), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    assertTrue("Client ready status should be true", client.isReady(node, 0L));
    client.disconnect(id);
    assertEquals(0, client.inFlightRequestCount());
    assertFalse("Client ready status should be false", client.isReady(node, 0L));
    transactionManager.resetProducerId();
    transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId + 1, (short) 0));
    // receive error
    sender.run(time.milliseconds());
    // reconnect
    sender.run(time.milliseconds());
    // nothing to do, since the pid has changed. We should check the metrics for errors.
    sender.run(time.milliseconds());
    assertEquals("Expected requests to be aborted after pid change", 0, client.inFlightRequestCount());
    KafkaMetric recordErrors = m.metrics().get(senderMetrics.recordErrorRate);
    assertTrue("Expected non-zero value for record send errors", recordErrors.value() > 0);
    assertTrue(responseFuture.isDone());
    assertEquals(0, (long) transactionManager.sequenceNumber(tp0));
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Metrics(org.apache.kafka.common.metrics.Metrics) Node(org.apache.kafka.common.Node) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) Test(org.junit.Test)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)189 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)45 ExecutionException (java.util.concurrent.ExecutionException)33 Callback (org.apache.kafka.clients.producer.Callback)32 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12