Search in sources :

Example 1 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testIdempotenceWithOldMagic.

@Test(expected = UnsupportedVersionException.class)
public void testIdempotenceWithOldMagic() throws InterruptedException {
    // Simulate talking to an older broker, ie. one which supports a lower magic.
    ApiVersions apiVersions = new ApiVersions();
    int batchSize = 1025;
    apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2))));
    RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, 10, 100L, metrics, time, apiVersions, new TransactionManager());
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0);
}
Also used : ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) ApiVersions(org.apache.kafka.clients.ApiVersions) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Test(org.junit.Test)

Example 2 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testAppendLargeOldMessageFormat.

private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception {
    int batchSize = 512;
    byte[] value = new byte[2 * batchSize];
    ApiVersions apiVersions = new ApiVersions();
    apiVersions.update(node1.idString(), NodeApiVersions.create(Collections.singleton(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2))));
    RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L);
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
    assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
    Deque<ProducerBatch> batches = accum.batches().get(tp1);
    assertEquals(1, batches.size());
    ProducerBatch producerBatch = batches.peek();
    List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
    assertEquals(1, recordBatches.size());
    MutableRecordBatch recordBatch = recordBatches.get(0);
    assertEquals(0L, recordBatch.baseOffset());
    List<Record> records = TestUtils.toList(recordBatch);
    assertEquals(1, records.size());
    Record record = records.get(0);
    assertEquals(0L, record.offset());
    assertEquals(ByteBuffer.wrap(key), record.key());
    assertEquals(ByteBuffer.wrap(value), record.value());
    assertEquals(0L, record.timestamp());
}
Also used : ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ApiVersions(org.apache.kafka.clients.ApiVersions) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) DefaultRecord(org.apache.kafka.common.record.DefaultRecord) Record(org.apache.kafka.common.record.Record)

Example 3 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testSplitBatchAndSend.

@SuppressWarnings("deprecation")
private void testSplitBatchAndSend(TransactionManager txnManager, ProducerIdAndEpoch producerIdAndEpoch, TopicPartition tp) throws Exception {
    int maxRetries = 1;
    String topic = tp.topic();
    // Set a good compression ratio.
    CompressionRatioEstimator.setEstimation(topic, CompressionType.GZIP, 0.2f);
    try (Metrics m = new Metrics()) {
        accumulator = new RecordAccumulator(logContext, batchSize, 1024 * 1024, CompressionType.GZIP, 0L, 0L, m, time, new ApiVersions(), txnManager);
        SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
        Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager, new ApiVersions());
        // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
        Cluster cluster1 = TestUtils.clusterWith(2, topic, 2);
        metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds());
        // Send the first message.
        Future<RecordMetadata> f1 = accumulator.append(tp, 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT).future;
        Future<RecordMetadata> f2 = accumulator.append(tp, 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT).future;
        // connect
        sender.run(time.milliseconds());
        // send produce request
        sender.run(time.milliseconds());
        assertEquals("The next sequence should be 2", 2, txnManager.sequenceNumber(tp).longValue());
        String id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        Node node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));
        Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>();
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE));
        client.respond(new ProduceResponse(responseMap));
        // split and reenqueue
        sender.run(time.milliseconds());
        assertEquals("The next sequence should be 2", 2, txnManager.sequenceNumber(tp).longValue());
        // The compression ratio should have been improved once.
        assertEquals(CompressionType.GZIP.rate - CompressionRatioEstimator.COMPRESSION_RATIO_IMPROVING_STEP, CompressionRatioEstimator.estimation(topic, CompressionType.GZIP), 0.01);
        // send the first produce request
        sender.run(time.milliseconds());
        assertEquals("The next sequence number should be 2", 2, txnManager.sequenceNumber(tp).longValue());
        assertFalse("The future shouldn't have been done.", f1.isDone());
        assertFalse("The future shouldn't have been done.", f2.isDone());
        id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L));
        client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap));
        // receive
        sender.run(time.milliseconds());
        assertTrue("The future should have been done.", f1.isDone());
        assertEquals("The next sequence number should still be 2", 2, txnManager.sequenceNumber(tp).longValue());
        assertEquals("The last ack'd sequence number should be 0", 0, txnManager.lastAckedSequence(tp));
        assertFalse("The future shouldn't have been done.", f2.isDone());
        assertEquals("Offset of the first message should be 0", 0L, f1.get().offset());
        // send the seconcd produce request
        sender.run(time.milliseconds());
        id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue("Client ready status should be true", client.isReady(node, 0L));
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L));
        client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap));
        // receive
        sender.run(time.milliseconds());
        assertTrue("The future should have been done.", f2.isDone());
        assertEquals("The next sequence number should be 2", 2, txnManager.sequenceNumber(tp).longValue());
        assertEquals("The last ack'd sequence number should be 1", 1, txnManager.lastAckedSequence(tp));
        assertEquals("Offset of the first message should be 1", 1L, f2.get().offset());
        assertTrue("There should be no batch in the accumulator", accumulator.batches().get(tp).isEmpty());
        assertTrue("There should be a split", m.metrics().get(senderMetrics.batchSplitRate).value() > 0);
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions)

Example 4 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class SenderTest method testNodeNotReady.

/**
 * Tests the code path where the target node to send FindCoordinator or InitProducerId
 * is not ready.
 */
@Test
public void testNodeNotReady() {
    final long producerId = 123456L;
    time = new MockTime(10);
    client = new MockClient(time, metadata);
    TransactionManager transactionManager = new TransactionManager(new LogContext(), "testNodeNotReady", 60000, 100L, new ApiVersions());
    setupWithTransactionState(transactionManager, false, null, true);
    ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0);
    transactionManager.initializeTransactions();
    sender.runOnce();
    Node node = metadata.fetch().nodes().get(0);
    client.delayReady(node, REQUEST_TIMEOUT + 20);
    prepareFindCoordinatorResponse(Errors.NONE, "testNodeNotReady");
    sender.runOnce();
    sender.runOnce();
    assertNotNull(transactionManager.coordinator(CoordinatorType.TRANSACTION), "Coordinator not found");
    client.throttle(node, REQUEST_TIMEOUT + 20);
    prepareFindCoordinatorResponse(Errors.NONE, "Coordinator not found");
    prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
    waitForProducerId(transactionManager, producerIdAndEpoch);
}
Also used : Node(org.apache.kafka.common.Node) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) LogContext(org.apache.kafka.common.utils.LogContext) ProducerIdAndEpoch(org.apache.kafka.common.utils.ProducerIdAndEpoch) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Example 5 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class SenderTest method testSplitBatchAndSend.

@SuppressWarnings("deprecation")
private void testSplitBatchAndSend(TransactionManager txnManager, ProducerIdAndEpoch producerIdAndEpoch, TopicPartition tp) throws Exception {
    int maxRetries = 1;
    String topic = tp.topic();
    int deliveryTimeoutMs = 3000;
    long totalSize = 1024 * 1024;
    String metricGrpName = "producer-metrics";
    // Set a good compression ratio.
    CompressionRatioEstimator.setEstimation(topic, CompressionType.GZIP, 0.2f);
    try (Metrics m = new Metrics()) {
        accumulator = new RecordAccumulator(logContext, batchSize, CompressionType.GZIP, 0, 0L, deliveryTimeoutMs, m, metricGrpName, time, new ApiVersions(), txnManager, new BufferPool(totalSize, batchSize, metrics, time, "producer-internal-metrics"));
        SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
        Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager, new ApiVersions());
        // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
        MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap(topic, 2));
        client.prepareMetadataUpdate(metadataUpdate1);
        // Send the first message.
        long nowMs = time.milliseconds();
        Future<RecordMetadata> f1 = accumulator.append(tp, 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, false, nowMs).future;
        Future<RecordMetadata> f2 = accumulator.append(tp, 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, false, nowMs).future;
        // connect
        sender.runOnce();
        // send produce request
        sender.runOnce();
        assertEquals(2, txnManager.sequenceNumber(tp).longValue(), "The next sequence should be 2");
        String id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        Node node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true");
        Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>();
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE));
        client.respond(new ProduceResponse(responseMap));
        // split and reenqueue
        sender.runOnce();
        assertEquals(2, txnManager.sequenceNumber(tp).longValue(), "The next sequence should be 2");
        // The compression ratio should have been improved once.
        assertEquals(CompressionType.GZIP.rate - CompressionRatioEstimator.COMPRESSION_RATIO_IMPROVING_STEP, CompressionRatioEstimator.estimation(topic, CompressionType.GZIP), 0.01);
        // send the first produce request
        sender.runOnce();
        assertEquals(2, txnManager.sequenceNumber(tp).longValue(), "The next sequence number should be 2");
        assertFalse(f1.isDone(), "The future shouldn't have been done.");
        assertFalse(f2.isDone(), "The future shouldn't have been done.");
        id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true");
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L));
        client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap));
        // receive
        sender.runOnce();
        assertTrue(f1.isDone(), "The future should have been done.");
        assertEquals(2, txnManager.sequenceNumber(tp).longValue(), "The next sequence number should still be 2");
        assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 0");
        assertFalse(f2.isDone(), "The future shouldn't have been done.");
        assertEquals(0L, f1.get().offset(), "Offset of the first message should be 0");
        // send the seconcd produce request
        sender.runOnce();
        id = client.requests().peek().destination();
        assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
        node = new Node(Integer.valueOf(id), "localhost", 0);
        assertEquals(1, client.inFlightRequestCount());
        assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true");
        responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L));
        client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap));
        // receive
        sender.runOnce();
        assertTrue(f2.isDone(), "The future should have been done.");
        assertEquals(2, txnManager.sequenceNumber(tp).longValue(), "The next sequence number should be 2");
        assertEquals(OptionalInt.of(1), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 1");
        assertEquals(1L, f2.get().offset(), "Offset of the first message should be 1");
        assertTrue(accumulator.batches().get(tp).isEmpty(), "There should be no batch in the accumulator");
        assertTrue((Double) (m.metrics().get(senderMetrics.batchSplitRate).metricValue()) > 0, "There should be a split");
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) Node(org.apache.kafka.common.Node) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Aggregations

ApiVersions (org.apache.kafka.clients.ApiVersions)24 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)22 Node (org.apache.kafka.common.Node)13 Test (org.junit.jupiter.api.Test)12 LogContext (org.apache.kafka.common.utils.LogContext)10 NetworkClient (org.apache.kafka.clients.NetworkClient)8 Cluster (org.apache.kafka.common.Cluster)7 Metrics (org.apache.kafka.common.metrics.Metrics)7 ByteBuffer (java.nio.ByteBuffer)6 ClientRequest (org.apache.kafka.clients.ClientRequest)6 MetricName (org.apache.kafka.common.MetricName)6 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)6 ApiVersionsResponse (org.apache.kafka.common.requests.ApiVersionsResponse)6 DelayedReceive (org.apache.kafka.test.DelayedReceive)6 MockSelector (org.apache.kafka.test.MockSelector)6 TopicPartition (org.apache.kafka.common.TopicPartition)5 ArrayList (java.util.ArrayList)4 Arrays.asList (java.util.Arrays.asList)4