Search in sources :

Example 6 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class Sender method sendProduceRequest.

/**
 * Create a produce request from the given record batches
 */
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
    if (batches.isEmpty())
        return;
    Map<TopicPartition, MemoryRecords> produceRecordsByPartition = new HashMap<>(batches.size());
    final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
    // find the minimum magic version used when creating the record sets
    byte minUsedMagic = apiVersions.maxUsableProduceMagic();
    for (ProducerBatch batch : batches) {
        if (batch.magic() < minUsedMagic)
            minUsedMagic = batch.magic();
    }
    for (ProducerBatch batch : batches) {
        TopicPartition tp = batch.topicPartition;
        MemoryRecords records = batch.records();
        // which is supporting the new magic version to one which doesn't, then we will need to convert.
        if (!records.hasMatchingMagic(minUsedMagic))
            records = batch.records().downConvert(minUsedMagic, 0, time).records();
        produceRecordsByPartition.put(tp, records);
        recordsByPartition.put(tp, batch);
    }
    String transactionalId = null;
    if (transactionManager != null && transactionManager.isTransactional()) {
        transactionalId = transactionManager.transactionalId();
    }
    ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forMagic(minUsedMagic, acks, timeout, produceRecordsByPartition, transactionalId);
    RequestCompletionHandler callback = new RequestCompletionHandler() {

        public void onComplete(ClientResponse response) {
            handleProduceResponse(response, recordsByPartition, time.milliseconds());
        }
    };
    String nodeId = Integer.toString(destination);
    ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, callback);
    client.send(clientRequest, now);
    log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) HashMap(java.util.HashMap) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) ClientRequest(org.apache.kafka.clients.ClientRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 7 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerNetworkClient method trySend.

private boolean trySend(long now) {
    // send any requests that can be sent now
    boolean requestsSent = false;
    for (Node node : unsent.nodes()) {
        Iterator<ClientRequest> iterator = unsent.requestIterator(node);
        while (iterator.hasNext()) {
            ClientRequest request = iterator.next();
            if (client.ready(node, now)) {
                client.send(request, now);
                iterator.remove();
                requestsSent = true;
            }
        }
    }
    return requestsSent;
}
Also used : Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest)

Example 8 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerNetworkClient method checkDisconnects.

private void checkDisconnects(long now) {
    // and set the disconnect flag in the ClientResponse
    for (Node node : unsent.nodes()) {
        if (client.connectionFailed(node)) {
            // Remove entry before invoking request callback to avoid callbacks handling
            // coordinator failures traversing the unsent list again.
            Collection<ClientRequest> requests = unsent.remove(node);
            for (ClientRequest request : requests) {
                RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback();
                AuthenticationException authenticationException = client.authenticationException(node);
                if (authenticationException != null)
                    handler.onFailure(authenticationException);
                else
                    handler.onComplete(new ClientResponse(request.makeHeader(request.requestBuilder().latestAllowedVersion()), request.callback(), request.destination(), request.createdTimeMs(), now, true, null, null));
            }
        }
    }
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) Node(org.apache.kafka.common.Node) ClientRequest(org.apache.kafka.clients.ClientRequest)

Example 9 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerNetworkClient method failExpiredRequests.

private void failExpiredRequests(long now) {
    // clear all expired unsent requests and fail their corresponding futures
    Collection<ClientRequest> expiredRequests = unsent.removeExpiredRequests(now, unsentExpiryMs);
    for (ClientRequest request : expiredRequests) {
        RequestFutureCompletionHandler handler = (RequestFutureCompletionHandler) request.callback();
        handler.onFailure(new TimeoutException("Failed to send request after " + unsentExpiryMs + " ms."));
    }
}
Also used : ClientRequest(org.apache.kafka.clients.ClientRequest) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 10 with ClientRequest

use of org.apache.kafka.clients.ClientRequest in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerNetworkClient method send.

/**
 * Send a new request. Note that the request is not actually transmitted on the
 * network until one of the {@link #poll(long)} variants is invoked. At this
 * point the request will either be transmitted successfully or will fail.
 * Use the returned future to obtain the result of the send. Note that there is no
 * need to check for disconnects explicitly on the {@link ClientResponse} object;
 * instead, the future will be failed with a {@link DisconnectException}.
 *
 * @param node The destination of the request
 * @param requestBuilder A builder for the request payload
 * @return A future which indicates the result of the send.
 */
public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) {
    long now = time.milliseconds();
    RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler();
    ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler);
    unsent.put(node, clientRequest);
    // wakeup the client in case it is blocking in poll so that we can send the queued request
    client.wakeup();
    return completionHandler.future;
}
Also used : ClientRequest(org.apache.kafka.clients.ClientRequest)

Aggregations

ClientRequest (org.apache.kafka.clients.ClientRequest)38 Node (org.apache.kafka.common.Node)25 Test (org.junit.jupiter.api.Test)11 HashMap (java.util.HashMap)8 MockClient (org.apache.kafka.clients.MockClient)8 Cluster (org.apache.kafka.common.Cluster)8 LinkedHashMap (java.util.LinkedHashMap)7 ClientResponse (org.apache.kafka.clients.ClientResponse)7 Test (org.junit.Test)7 ApiVersions (org.apache.kafka.clients.ApiVersions)6 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)6 MetricName (org.apache.kafka.common.MetricName)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 ByteBuffer (java.nio.ByteBuffer)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)5 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)5 AbstractRequest (org.apache.kafka.common.requests.AbstractRequest)5