Search in sources :

Example 1 with ResponseCallbackWrapper

use of org.apache.kafka.common.requests.ResponseCallbackWrapper in project starlight-for-kafka by datastax.

the class KafkaCommandDecoder method writeAndFlushResponseToClient.

// Write and flush continuously completed request back through channel.
// This is to make sure request get responseFuture in the same order.
protected void writeAndFlushResponseToClient(Channel channel) {
    // loop from first responseFuture.
    while (isActive.get()) {
        final ResponseAndRequest responseAndRequest = requestQueue.peek();
        if (responseAndRequest == null) {
            // requestQueue is empty
            break;
        }
        final CompletableFuture<AbstractResponse> responseFuture = responseAndRequest.getResponseFuture();
        final ApiKeys apiKey = responseAndRequest.getApiKey();
        final long nanoSecondsSinceCreated = responseAndRequest.nanoSecondsSinceCreated();
        final boolean expired = (nanoSecondsSinceCreated > TimeUnit.MILLISECONDS.toNanos(kafkaConfig.getRequestTimeoutMs()));
        if (!responseFuture.isDone() && !expired) {
            // case 1: responseFuture is not completed or expired, stop polling responses from responseQueue
            requestStats.getResponseBlockedTimes().inc();
            long firstBlockTimestamp = responseAndRequest.getFirstBlockedTimestamp();
            if (firstBlockTimestamp == 0) {
                responseAndRequest.setFirstBlockedTimestamp(MathUtils.nowInNano());
            }
            break;
        } else {
            if (requestQueue.remove(responseAndRequest)) {
                RequestStats.REQUEST_QUEUE_SIZE_INSTANCE.decrementAndGet();
            } else {
                // it has been removed by another thread, skip this element
                continue;
            }
        }
        if (responseAndRequest.getFirstBlockedTimestamp() != 0) {
            requestStats.getResponseBlockedLatency().registerSuccessfulEvent(MathUtils.elapsedNanos(responseAndRequest.getFirstBlockedTimestamp()), TimeUnit.NANOSECONDS);
        }
        final KafkaHeaderAndRequest request = responseAndRequest.getRequest();
        // case 2: responseFuture is completed exceptionally
        if (responseFuture.isCompletedExceptionally()) {
            responseFuture.exceptionally(e -> {
                log.error("[{}] request {} completed exceptionally", channel, request.getHeader(), e);
                channel.writeAndFlush(request.createErrorResponse(e));
                requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerFailedEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
                return null;
            });
            // send exception to client?
            continue;
        }
        // case 3: responseFuture is completed normally
        if (responseFuture.isDone()) {
            responseFuture.thenAccept(response -> {
                if (response == null) {
                    // It should not be null, just check it for safety
                    log.error("[{}] Unexpected null completed future for request {}", ctx.channel(), request.getHeader());
                    channel.writeAndFlush(request.createErrorResponse(new ApiException("response is null")));
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Write kafka cmd to client." + " request content: {}" + " responseAndRequest content: {}", request, response.toString(request.getRequest().version()));
                }
                final ByteBuf result = responseToByteBuf(response, request);
                channel.writeAndFlush(result).addListener(future -> {
                    if (response instanceof ResponseCallbackWrapper) {
                        ((ResponseCallbackWrapper) response).responseComplete();
                    }
                    if (!future.isSuccess()) {
                        log.error("[{}] Failed to write {}", channel, request.getHeader(), future.cause());
                    }
                });
                requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerSuccessfulEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
            });
            continue;
        }
        // case 4: responseFuture is expired
        if (expired) {
            log.error("[{}] request {} is not completed for {} ns (> {} ms)", channel, request.getHeader(), nanoSecondsSinceCreated, kafkaConfig.getRequestTimeoutMs());
            responseFuture.cancel(true);
            channel.writeAndFlush(request.createErrorResponse(new ApiException("request is expired from server side")));
            requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerFailedEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
        }
    }
}
Also used : ApiKeys(org.apache.kafka.common.protocol.ApiKeys) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ByteBuf(io.netty.buffer.ByteBuf) ApiException(org.apache.kafka.common.errors.ApiException)

Example 2 with ResponseCallbackWrapper

use of org.apache.kafka.common.requests.ResponseCallbackWrapper in project starlight-for-kafka by datastax.

the class MessageFetchContext method complete.

public void complete() {
    if (resultFuture == null) {
        // the context has been recycled
        return;
    }
    if (resultFuture.isCancelled()) {
        // The request was cancelled by KafkaCommandDecoder when channel is closed or this request is expired,
        // so the Netty buffers should be released.
        decodeResults.forEach(DecodeResult::recycle);
        return;
    }
    if (resultFuture.isDone()) {
        // It may be triggered again in DelayedProduceAndFetch
        return;
    }
    // Keep the order of TopicPartition
    final LinkedHashMap<TopicPartition, PartitionData<MemoryRecords>> orderedResponseData = new LinkedHashMap<>();
    // add the topicPartition with timeout error if it's not existed in responseData
    fetchRequest.fetchData().keySet().forEach(topicPartition -> {
        final PartitionData<MemoryRecords> partitionData = responseData.remove(topicPartition);
        if (partitionData != null) {
            orderedResponseData.put(topicPartition, partitionData);
        } else {
            orderedResponseData.put(topicPartition, new FetchResponse.PartitionData<>(Errors.REQUEST_TIMED_OUT, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
        }
    });
    // Create a copy of this.decodeResults so the lambda expression will capture the current state
    // because this.decodeResults will cleared after resultFuture is completed.
    final List<DecodeResult> decodeResults = new ArrayList<>(this.decodeResults);
    resultFuture.complete(new ResponseCallbackWrapper(new FetchResponse<>(Errors.NONE, orderedResponseData, ((Integer) THROTTLE_TIME_MS.defaultValue), fetchRequest.metadata().sessionId()), () -> {
        // release the batched ByteBuf if necessary
        decodeResults.forEach(DecodeResult::recycle);
    }));
    recycle();
}
Also used : ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 3 with ResponseCallbackWrapper

use of org.apache.kafka.common.requests.ResponseCallbackWrapper in project kop by streamnative.

the class KafkaCommandDecoder method writeAndFlushResponseToClient.

// Write and flush continuously completed request back through channel.
// This is to make sure request get responseFuture in the same order.
protected void writeAndFlushResponseToClient(Channel channel) {
    // loop from first responseFuture.
    while (isActive.get()) {
        final ResponseAndRequest responseAndRequest = requestQueue.peek();
        if (responseAndRequest == null) {
            // requestQueue is empty
            break;
        }
        final CompletableFuture<AbstractResponse> responseFuture = responseAndRequest.getResponseFuture();
        final ApiKeys apiKey = responseAndRequest.getApiKey();
        final long nanoSecondsSinceCreated = responseAndRequest.nanoSecondsSinceCreated();
        final boolean expired = (nanoSecondsSinceCreated > TimeUnit.MILLISECONDS.toNanos(kafkaConfig.getRequestTimeoutMs()));
        if (!responseFuture.isDone() && !expired) {
            // case 1: responseFuture is not completed or expired, stop polling responses from responseQueue
            requestStats.getResponseBlockedTimes().inc();
            long firstBlockTimestamp = responseAndRequest.getFirstBlockedTimestamp();
            if (firstBlockTimestamp == 0) {
                responseAndRequest.setFirstBlockedTimestamp(MathUtils.nowInNano());
            }
            break;
        } else {
            if (requestQueue.remove(responseAndRequest)) {
                RequestStats.REQUEST_QUEUE_SIZE_INSTANCE.decrementAndGet();
            } else {
                // it has been removed by another thread, skip this element
                continue;
            }
        }
        if (responseAndRequest.getFirstBlockedTimestamp() != 0) {
            requestStats.getResponseBlockedLatency().registerSuccessfulEvent(MathUtils.elapsedNanos(responseAndRequest.getFirstBlockedTimestamp()), TimeUnit.NANOSECONDS);
        }
        final KafkaHeaderAndRequest request = responseAndRequest.getRequest();
        // case 2: responseFuture is completed exceptionally
        if (responseFuture.isCompletedExceptionally()) {
            responseFuture.exceptionally(e -> {
                log.error("[{}] request {} completed exceptionally", channel, request.getHeader(), e);
                channel.writeAndFlush(request.createErrorResponse(e));
                requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerFailedEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
                return null;
            });
            // send exception to client?
            continue;
        }
        // case 3: responseFuture is completed normally
        if (responseFuture.isDone()) {
            responseFuture.thenAccept(response -> {
                if (response == null) {
                    // It should not be null, just check it for safety
                    log.error("[{}] Unexpected null completed future for request {}", ctx.channel(), request.getHeader());
                    channel.writeAndFlush(request.createErrorResponse(new ApiException("response is null")));
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Write kafka cmd to client." + " request content: {}" + " responseAndRequest content: {}", request, response.toString(request.getRequest().version()));
                }
                final ByteBuf result = responseToByteBuf(response, request);
                channel.writeAndFlush(result).addListener(future -> {
                    if (response instanceof ResponseCallbackWrapper) {
                        ((ResponseCallbackWrapper) response).responseComplete();
                    }
                    if (!future.isSuccess()) {
                        log.error("[{}] Failed to write {}", channel, request.getHeader(), future.cause());
                    }
                });
                requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerSuccessfulEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
            });
            continue;
        }
        // case 4: responseFuture is expired
        if (expired) {
            log.error("[{}] request {} is not completed for {} ns (> {} ms)", channel, request.getHeader(), nanoSecondsSinceCreated, kafkaConfig.getRequestTimeoutMs());
            responseFuture.cancel(true);
            channel.writeAndFlush(request.createErrorResponse(new ApiException("request is expired from server side")));
            requestStats.getRequestStatsLogger(apiKey, KopServerStats.REQUEST_QUEUED_LATENCY).registerFailedEvent(nanoSecondsSinceCreated, TimeUnit.NANOSECONDS);
        }
    }
}
Also used : ApiKeys(org.apache.kafka.common.protocol.ApiKeys) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ByteBuf(io.netty.buffer.ByteBuf) ApiException(org.apache.kafka.common.errors.ApiException)

Example 4 with ResponseCallbackWrapper

use of org.apache.kafka.common.requests.ResponseCallbackWrapper in project kop by streamnative.

the class MessageFetchContext method complete.

public void complete() {
    if (resultFuture == null) {
        // the context has been recycled
        return;
    }
    if (resultFuture.isCancelled()) {
        // The request was cancelled by KafkaCommandDecoder when channel is closed or this request is expired,
        // so the Netty buffers should be released.
        decodeResults.forEach(DecodeResult::recycle);
        return;
    }
    if (resultFuture.isDone()) {
        // It may be triggered again in DelayedProduceAndFetch
        return;
    }
    // Keep the order of TopicPartition
    final LinkedHashMap<TopicPartition, PartitionData<MemoryRecords>> orderedResponseData = new LinkedHashMap<>();
    // add the topicPartition with timeout error if it's not existed in responseData
    fetchRequest.fetchData().keySet().forEach(topicPartition -> {
        final PartitionData<MemoryRecords> partitionData = responseData.remove(topicPartition);
        if (partitionData != null) {
            orderedResponseData.put(topicPartition, partitionData);
        } else {
            orderedResponseData.put(topicPartition, new FetchResponse.PartitionData<>(Errors.REQUEST_TIMED_OUT, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
        }
    });
    // Create a copy of this.decodeResults so the lambda expression will capture the current state
    // because this.decodeResults will cleared after resultFuture is completed.
    final List<DecodeResult> decodeResults = new ArrayList<>(this.decodeResults);
    resultFuture.complete(new ResponseCallbackWrapper(new FetchResponse<>(Errors.NONE, orderedResponseData, ((Integer) THROTTLE_TIME_MS.defaultValue), fetchRequest.metadata().sessionId()), () -> {
        // release the batched ByteBuf if necessary
        decodeResults.forEach(DecodeResult::recycle);
    }));
    recycle();
}
Also used : ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Aggregations

ResponseCallbackWrapper (org.apache.kafka.common.requests.ResponseCallbackWrapper)4 ByteBuf (io.netty.buffer.ByteBuf)2 DecodeResult (io.streamnative.pulsar.handlers.kop.format.DecodeResult)2 ArrayList (java.util.ArrayList)2 LinkedHashMap (java.util.LinkedHashMap)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 ApiException (org.apache.kafka.common.errors.ApiException)2 ApiKeys (org.apache.kafka.common.protocol.ApiKeys)2 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)2 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)2 FetchResponse (org.apache.kafka.common.requests.FetchResponse)2 PartitionData (org.apache.kafka.common.requests.FetchResponse.PartitionData)2