Search in sources :

Example 26 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testQuotaMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@Test
@SuppressWarnings("deprecation")
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext);
    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
    selector.clear();
    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        ProduceRequest.Builder builder = ProduceRequest.Builder.forCurrentMagic((short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs);
        buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg);
    KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax);
    // Throttle times are ApiVersions=400, Produce=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPS);
    assertEquals(400, maxMetric.value(), EPS);
    client.close();
}
Also used : ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) Node(org.apache.kafka.common.Node) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) Cluster(org.apache.kafka.common.Cluster) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteBuffer(java.nio.ByteBuffer) MockSelector(org.apache.kafka.test.MockSelector) MetricName(org.apache.kafka.common.MetricName) NetworkClient(org.apache.kafka.clients.NetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) DelayedReceive(org.apache.kafka.test.DelayedReceive) ClientRequest(org.apache.kafka.clients.ClientRequest) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.Test)

Example 27 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testCorrectHandlingOfDuplicateSequenceError.

@Test
public void testCorrectHandlingOfDuplicateSequenceError() throws Exception {
    final long producerId = 343434L;
    TransactionManager transactionManager = new TransactionManager();
    setupWithTransactionState(transactionManager);
    prepareAndReceiveInitProducerId(producerId, Errors.NONE);
    assertTrue(transactionManager.hasProducerId());
    assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
    // Send first ProduceRequest
    Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    String nodeId = client.requests().peek().destination();
    Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
    assertEquals(1, client.inFlightRequestCount());
    assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    // Send second ProduceRequest
    Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds());
    assertEquals(2, client.inFlightRequestCount());
    assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
    assertEquals(-1, transactionManager.lastAckedSequence(tp0));
    assertFalse(request1.isDone());
    assertFalse(request2.isDone());
    assertTrue(client.isReady(node, time.milliseconds()));
    ClientRequest firstClientRequest = client.requests().peek();
    ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
    client.respondToRequest(secondClientRequest, produceResponse(tp0, 1000, Errors.NONE, 0));
    // receive response 1
    sender.run(time.milliseconds());
    assertEquals(1000, transactionManager.lastAckedOffset(tp0));
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    client.respondToRequest(firstClientRequest, produceResponse(tp0, ProduceResponse.INVALID_OFFSET, Errors.DUPLICATE_SEQUENCE_NUMBER, 0));
    // receive response 0
    sender.run(time.milliseconds());
    // Make sure that the last ack'd sequence doesn't change.
    assertEquals(1, transactionManager.lastAckedSequence(tp0));
    assertEquals(1000, transactionManager.lastAckedOffset(tp0));
    assertFalse(client.hasInFlightRequests());
    RecordMetadata unknownMetadata = request1.get();
    assertFalse(unknownMetadata.hasOffset());
    assertEquals(-1L, unknownMetadata.offset());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest) Test(org.junit.Test)

Example 28 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testQuotaMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@Test
public void testQuotaMetrics() throws Exception {
    MockSelector selector = new MockSelector(time);
    Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry);
    Cluster cluster = TestUtils.singletonCluster("test", 1);
    Node node = cluster.nodes().get(0);
    NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, new LogContext());
    short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
    ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
    selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
    while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
    selector.clear();
    for (int i = 1; i <= 3; i++) {
        int throttleTimeMs = 100 * i;
        FetchRequest.Builder builder = FetchRequest.Builder.forConsumer(100, 100, new LinkedHashMap<TopicPartition, PartitionData>());
        ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
        client.send(request, time.milliseconds());
        client.poll(1, time.milliseconds());
        FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
        buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId()));
        selector.completeReceive(new NetworkReceive(node.idString(), buffer));
        client.poll(1, time.milliseconds());
        selector.clear();
    }
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric avgMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeAvg));
    KafkaMetric maxMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeMax));
    // Throttle times are ApiVersions=400, Fetch=(100, 200, 300)
    assertEquals(250, avgMetric.value(), EPSILON);
    assertEquals(400, maxMetric.value(), EPSILON);
    client.close();
}
Also used : ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) Node(org.apache.kafka.common.Node) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) Cluster(org.apache.kafka.common.Cluster) LogContext(org.apache.kafka.common.utils.LogContext) FetchResponse(org.apache.kafka.common.requests.FetchResponse) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteBuffer(java.nio.ByteBuffer) MockSelector(org.apache.kafka.test.MockSelector) MetricName(org.apache.kafka.common.MetricName) NetworkClient(org.apache.kafka.clients.NetworkClient) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) FetchRequest(org.apache.kafka.common.requests.FetchRequest) DelayedReceive(org.apache.kafka.test.DelayedReceive) ClientRequest(org.apache.kafka.clients.ClientRequest) Sensor(org.apache.kafka.common.metrics.Sensor) Test(org.junit.Test)

Example 29 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class Sender method sendProduceRequest.

/**
 * Create a produce request from the given record batches
 */
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
    if (batches.isEmpty())
        return;
    final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
    // find the minimum magic version used when creating the record sets
    byte minUsedMagic = apiVersions.maxUsableProduceMagic();
    for (ProducerBatch batch : batches) {
        if (batch.magic() < minUsedMagic)
            minUsedMagic = batch.magic();
    }
    ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection();
    for (ProducerBatch batch : batches) {
        TopicPartition tp = batch.topicPartition;
        MemoryRecords records = batch.records();
        // which is supporting the new magic version to one which doesn't, then we will need to convert.
        if (!records.hasMatchingMagic(minUsedMagic))
            records = batch.records().downConvert(minUsedMagic, 0, time).records();
        ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic());
        if (tpData == null) {
            tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic());
            tpd.add(tpData);
        }
        tpData.partitionData().add(new ProduceRequestData.PartitionProduceData().setIndex(tp.partition()).setRecords(records));
        recordsByPartition.put(tp, batch);
    }
    String transactionalId = null;
    if (transactionManager != null && transactionManager.isTransactional()) {
        transactionalId = transactionManager.transactionalId();
    }
    ProduceRequest.Builder requestBuilder = ProduceRequest.forMagic(minUsedMagic, new ProduceRequestData().setAcks(acks).setTimeoutMs(timeout).setTransactionalId(transactionalId).setTopicData(tpd));
    RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds());
    String nodeId = Integer.toString(destination);
    ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, requestTimeoutMs, callback);
    client.send(clientRequest, now);
    log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Also used : Max(org.apache.kafka.common.metrics.stats.Max) TransactionAbortedException(org.apache.kafka.common.errors.TransactionAbortedException) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) Metadata(org.apache.kafka.clients.Metadata) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) RetriableException(org.apache.kafka.common.errors.RetriableException) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) Function(java.util.function.Function) ClientRequest(org.apache.kafka.clients.ClientRequest) InvalidRecordException(org.apache.kafka.common.InvalidRecordException) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) RequestHeader(org.apache.kafka.common.requests.RequestHeader) FindCoordinatorRequest(org.apache.kafka.common.requests.FindCoordinatorRequest) InvalidMetadataException(org.apache.kafka.common.errors.InvalidMetadataException) KafkaClient(org.apache.kafka.clients.KafkaClient) RecordBatch(org.apache.kafka.common.record.RecordBatch) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) MetricName(org.apache.kafka.common.MetricName) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) ProduceResponse(org.apache.kafka.common.requests.ProduceResponse) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) IOException(java.io.IOException) ApiVersions(org.apache.kafka.clients.ApiVersions) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) List(java.util.List) NetworkClientUtils(org.apache.kafka.clients.NetworkClientUtils) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) Avg(org.apache.kafka.common.metrics.stats.Avg) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Meter(org.apache.kafka.common.metrics.stats.Meter) Collections(java.util.Collections) ClientResponse(org.apache.kafka.clients.ClientResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) HashMap(java.util.HashMap) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) ClientRequest(org.apache.kafka.clients.ClientRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 30 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project kafka by apache.

the class ConsumerNetworkClient method failUnsentRequests.

private void failUnsentRequests(Node node, RuntimeException e) {
    // clear unsent requests to node and fail their corresponding futures
    lock.lock();
    try {
        Collection<ClientRequest> unsentRequests = unsent.remove(node);
        for (ClientRequest unsentRequest : unsentRequests) {
            RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) unsentRequest.callback();
            handler.onFailure(e);
        }
    } finally {
        lock.unlock();
    }
}
Also used : ClientRequest(org.apache.kafka.clients.ClientRequest)

Aggregations

ClientRequest (org.apache.kafka.clients.ClientRequest)38 Node (org.apache.kafka.common.Node)25 Test (org.junit.jupiter.api.Test)11 HashMap (java.util.HashMap)8 MockClient (org.apache.kafka.clients.MockClient)8 Cluster (org.apache.kafka.common.Cluster)8 LinkedHashMap (java.util.LinkedHashMap)7 ClientResponse (org.apache.kafka.clients.ClientResponse)7 Test (org.junit.Test)7 ApiVersions (org.apache.kafka.clients.ApiVersions)6 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)6 MetricName (org.apache.kafka.common.MetricName)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 ByteBuffer (java.nio.ByteBuffer)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)5 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)5 AbstractRequest (org.apache.kafka.common.requests.AbstractRequest)5