Search in sources :

Example 1 with DecodeResult

use of io.streamnative.pulsar.handlers.kop.format.DecodeResult in project starlight-for-kafka by datastax.

the class MessageFetchContext method handleEntries.

private void handleEntries(final List<Entry> entries, final TopicPartition topicPartition, final FetchRequest.PartitionData partitionData, final KafkaTopicConsumerManager tcm, final ManagedCursor cursor, final AtomicLong cursorOffset, final boolean readCommitted) {
    final long highWatermark = MessageMetadataUtils.getHighWatermark(cursor.getManagedLedger());
    // Add new offset back to TCM after entries are read successfully
    tcm.add(cursorOffset.get(), Pair.of(cursor, cursorOffset.get()));
    PartitionLog partitionLog = requestHandler.getReplicaManager().getPartitionLog(topicPartition, namespacePrefix);
    final long lso = (readCommitted ? partitionLog.firstUndecidedOffset().orElse(highWatermark) : highWatermark);
    List<Entry> committedEntries = entries;
    if (readCommitted) {
        committedEntries = getCommittedEntries(entries, lso);
        if (log.isDebugEnabled()) {
            log.debug("Request {}: read {} entries but only {} entries are committed", header, entries.size(), committedEntries.size());
        }
    } else {
        if (log.isDebugEnabled()) {
            log.debug("Request {}: read {} entries", header, entries.size());
        }
    }
    if (committedEntries.isEmpty()) {
        addErrorPartitionResponse(topicPartition, Errors.NONE);
        return;
    }
    // use compatible magic value by apiVersion
    short apiVersion = header.apiVersion();
    final byte magic;
    if (apiVersion <= 1) {
        magic = RecordBatch.MAGIC_VALUE_V0;
    } else if (apiVersion <= 3) {
        magic = RecordBatch.MAGIC_VALUE_V1;
    } else {
        magic = RecordBatch.CURRENT_MAGIC_VALUE;
    }
    CompletableFuture<String> groupNameFuture = requestHandler.getCurrentConnectedGroup().computeIfAbsent(clientHost, clientHost -> {
        CompletableFuture<String> future = new CompletableFuture<>();
        String groupIdPath = GroupIdUtils.groupIdPathFormat(clientHost, header.clientId());
        requestHandler.getMetadataStore().get(requestHandler.getGroupIdStoredPath() + groupIdPath).thenAccept(getResultOpt -> {
            if (getResultOpt.isPresent()) {
                GetResult getResult = getResultOpt.get();
                future.complete(new String(getResult.getValue() == null ? new byte[0] : getResult.getValue(), StandardCharsets.UTF_8));
            } else {
                future.complete("");
            }
        }).exceptionally(ex -> {
            future.completeExceptionally(ex);
            return null;
        });
        return future;
    });
    // this part is heavyweight, and we should not execute in the ManagedLedger Ordered executor thread
    groupNameFuture.whenCompleteAsync((groupName, ex) -> {
        if (ex != null) {
            log.error("Get groupId failed.", ex);
            groupName = "";
        }
        final long startDecodingEntriesNanos = MathUtils.nowInNano();
        final DecodeResult decodeResult = requestHandler.getEntryFormatter().decode(entries, magic);
        requestHandler.requestStats.getFetchDecodeStats().registerSuccessfulEvent(MathUtils.elapsedNanos(startDecodingEntriesNanos), TimeUnit.NANOSECONDS);
        decodeResults.add(decodeResult);
        final MemoryRecords kafkaRecords = decodeResult.getRecords();
        // collect consumer metrics
        decodeResult.updateConsumerStats(topicPartition, entries.size(), groupName, statsLogger);
        List<FetchResponse.AbortedTransaction> abortedTransactions;
        if (readCommitted) {
            abortedTransactions = partitionLog.getAbortedIndexList(partitionData.fetchOffset);
        } else {
            abortedTransactions = null;
        }
        responseData.put(topicPartition, new PartitionData<>(Errors.NONE, highWatermark, lso, // TODO: should it be changed to the logStartOffset?
        highWatermark, abortedTransactions, kafkaRecords));
        bytesReadable.getAndAdd(kafkaRecords.sizeInBytes());
        tryComplete();
    }, requestHandler.getDecodeExecutor());
}
Also used : MathUtils(org.apache.bookkeeper.common.util.MathUtils) IsolationLevel(org.apache.kafka.common.requests.IsolationLevel) NonDurableCursorImpl(org.apache.bookkeeper.mledger.impl.NonDurableCursorImpl) ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) OpStatsLogger(org.apache.bookkeeper.stats.OpStatsLogger) ManagedCursor(org.apache.bookkeeper.mledger.ManagedCursor) Handle(io.netty.util.Recycler.Handle) Pair(org.apache.commons.lang3.tuple.Pair) RequestHeader(org.apache.kafka.common.requests.RequestHeader) Map(java.util.Map) RecordBatch(org.apache.kafka.common.record.RecordBatch) FetchResponse(org.apache.kafka.common.requests.FetchResponse) DelayedOperationPurgatory(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationPurgatory) PositionImpl(org.apache.bookkeeper.mledger.impl.PositionImpl) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionLog(io.streamnative.pulsar.handlers.kop.storage.PartitionLog) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) MessageMetadataUtils(io.streamnative.pulsar.handlers.kop.utils.MessageMetadataUtils) TransactionCoordinator(io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Recycler(io.netty.util.Recycler) ManagedLedgerException(org.apache.bookkeeper.mledger.ManagedLedgerException) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Errors(org.apache.kafka.common.protocol.Errors) FetchRequest(org.apache.kafka.common.requests.FetchRequest) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Entry(org.apache.bookkeeper.mledger.Entry) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) Resource(io.streamnative.pulsar.handlers.kop.security.auth.Resource) Lists(com.google.common.collect.Lists) GetResult(org.apache.pulsar.metadata.api.GetResult) THROTTLE_TIME_MS(org.apache.kafka.common.protocol.CommonFields.THROTTLE_TIME_MS) GroupIdUtils(io.streamnative.pulsar.handlers.kop.utils.GroupIdUtils) ManagedLedgerImpl(org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl) DelayedOperation(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperation) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) ResourceType(io.streamnative.pulsar.handlers.kop.security.auth.ResourceType) MetadataCorruptedException(io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException) KafkaHeaderAndRequest(io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest) AclOperation(org.apache.kafka.common.acl.AclOperation) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TimeUnit(java.util.concurrent.TimeUnit) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) AtomicLong(java.util.concurrent.atomic.AtomicLong) MarkDeleteCallback(org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback) DelayedOperationKey(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationKey) ReadEntriesCallback(org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback) ApiException(org.apache.kafka.common.errors.ApiException) GetResult(org.apache.pulsar.metadata.api.GetResult) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) Entry(org.apache.bookkeeper.mledger.Entry) CompletableFuture(java.util.concurrent.CompletableFuture) PartitionLog(io.streamnative.pulsar.handlers.kop.storage.PartitionLog) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 2 with DecodeResult

use of io.streamnative.pulsar.handlers.kop.format.DecodeResult in project starlight-for-kafka by datastax.

the class MessageFetchContext method complete.

public void complete() {
    if (resultFuture == null) {
        // the context has been recycled
        return;
    }
    if (resultFuture.isCancelled()) {
        // The request was cancelled by KafkaCommandDecoder when channel is closed or this request is expired,
        // so the Netty buffers should be released.
        decodeResults.forEach(DecodeResult::recycle);
        return;
    }
    if (resultFuture.isDone()) {
        // It may be triggered again in DelayedProduceAndFetch
        return;
    }
    // Keep the order of TopicPartition
    final LinkedHashMap<TopicPartition, PartitionData<MemoryRecords>> orderedResponseData = new LinkedHashMap<>();
    // add the topicPartition with timeout error if it's not existed in responseData
    fetchRequest.fetchData().keySet().forEach(topicPartition -> {
        final PartitionData<MemoryRecords> partitionData = responseData.remove(topicPartition);
        if (partitionData != null) {
            orderedResponseData.put(topicPartition, partitionData);
        } else {
            orderedResponseData.put(topicPartition, new FetchResponse.PartitionData<>(Errors.REQUEST_TIMED_OUT, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
        }
    });
    // Create a copy of this.decodeResults so the lambda expression will capture the current state
    // because this.decodeResults will cleared after resultFuture is completed.
    final List<DecodeResult> decodeResults = new ArrayList<>(this.decodeResults);
    resultFuture.complete(new ResponseCallbackWrapper(new FetchResponse<>(Errors.NONE, orderedResponseData, ((Integer) THROTTLE_TIME_MS.defaultValue), fetchRequest.metadata().sessionId()), () -> {
        // release the batched ByteBuf if necessary
        decodeResults.forEach(DecodeResult::recycle);
    }));
    recycle();
}
Also used : ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 3 with DecodeResult

use of io.streamnative.pulsar.handlers.kop.format.DecodeResult in project kop by streamnative.

the class MessageFetchContext method handleEntries.

private void handleEntries(final List<Entry> entries, final TopicPartition topicPartition, final FetchRequest.PartitionData partitionData, final KafkaTopicConsumerManager tcm, final ManagedCursor cursor, final AtomicLong cursorOffset, final boolean readCommitted) {
    final long highWatermark = MessageMetadataUtils.getHighWatermark(cursor.getManagedLedger());
    // Add new offset back to TCM after entries are read successfully
    tcm.add(cursorOffset.get(), Pair.of(cursor, cursorOffset.get()));
    PartitionLog partitionLog = requestHandler.getReplicaManager().getPartitionLog(topicPartition, namespacePrefix);
    final long lso = (readCommitted ? partitionLog.firstUndecidedOffset().orElse(highWatermark) : highWatermark);
    List<Entry> committedEntries = entries;
    if (readCommitted) {
        committedEntries = getCommittedEntries(entries, lso);
        if (log.isDebugEnabled()) {
            log.debug("Request {}: read {} entries but only {} entries are committed", header, entries.size(), committedEntries.size());
        }
    } else {
        if (log.isDebugEnabled()) {
            log.debug("Request {}: read {} entries", header, entries.size());
        }
    }
    if (committedEntries.isEmpty()) {
        addErrorPartitionResponse(topicPartition, Errors.NONE);
        return;
    }
    // use compatible magic value by apiVersion
    short apiVersion = header.apiVersion();
    final byte magic;
    if (apiVersion <= 1) {
        magic = RecordBatch.MAGIC_VALUE_V0;
    } else if (apiVersion <= 3) {
        magic = RecordBatch.MAGIC_VALUE_V1;
    } else {
        magic = RecordBatch.CURRENT_MAGIC_VALUE;
    }
    CompletableFuture<String> groupNameFuture = requestHandler.getCurrentConnectedGroup().computeIfAbsent(clientHost, clientHost -> {
        CompletableFuture<String> future = new CompletableFuture<>();
        String groupIdPath = GroupIdUtils.groupIdPathFormat(clientHost, header.clientId());
        requestHandler.getMetadataStore().get(requestHandler.getGroupIdStoredPath() + groupIdPath).thenAccept(getResultOpt -> {
            if (getResultOpt.isPresent()) {
                GetResult getResult = getResultOpt.get();
                future.complete(new String(getResult.getValue() == null ? new byte[0] : getResult.getValue(), StandardCharsets.UTF_8));
            } else {
                future.complete("");
            }
        }).exceptionally(ex -> {
            future.completeExceptionally(ex);
            return null;
        });
        return future;
    });
    // this part is heavyweight, and we should not execute in the ManagedLedger Ordered executor thread
    groupNameFuture.whenCompleteAsync((groupName, ex) -> {
        if (ex != null) {
            log.error("Get groupId failed.", ex);
            groupName = "";
        }
        final long startDecodingEntriesNanos = MathUtils.nowInNano();
        final DecodeResult decodeResult = requestHandler.getEntryFormatter().decode(entries, magic);
        requestHandler.requestStats.getFetchDecodeStats().registerSuccessfulEvent(MathUtils.elapsedNanos(startDecodingEntriesNanos), TimeUnit.NANOSECONDS);
        decodeResults.add(decodeResult);
        final MemoryRecords kafkaRecords = decodeResult.getRecords();
        // collect consumer metrics
        decodeResult.updateConsumerStats(topicPartition, entries.size(), groupName, statsLogger);
        List<FetchResponse.AbortedTransaction> abortedTransactions;
        if (readCommitted) {
            abortedTransactions = partitionLog.getAbortedIndexList(partitionData.fetchOffset);
        } else {
            abortedTransactions = null;
        }
        responseData.put(topicPartition, new PartitionData<>(Errors.NONE, highWatermark, lso, // TODO: should it be changed to the logStartOffset?
        highWatermark, abortedTransactions, kafkaRecords));
        bytesReadable.getAndAdd(kafkaRecords.sizeInBytes());
        tryComplete();
    }, requestHandler.getDecodeExecutor());
}
Also used : MathUtils(org.apache.bookkeeper.common.util.MathUtils) IsolationLevel(org.apache.kafka.common.requests.IsolationLevel) NonDurableCursorImpl(org.apache.bookkeeper.mledger.impl.NonDurableCursorImpl) ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) OpStatsLogger(org.apache.bookkeeper.stats.OpStatsLogger) ManagedCursor(org.apache.bookkeeper.mledger.ManagedCursor) Handle(io.netty.util.Recycler.Handle) Pair(org.apache.commons.lang3.tuple.Pair) RequestHeader(org.apache.kafka.common.requests.RequestHeader) Map(java.util.Map) RecordBatch(org.apache.kafka.common.record.RecordBatch) FetchResponse(org.apache.kafka.common.requests.FetchResponse) DelayedOperationPurgatory(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationPurgatory) PositionImpl(org.apache.bookkeeper.mledger.impl.PositionImpl) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionLog(io.streamnative.pulsar.handlers.kop.storage.PartitionLog) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) MessageMetadataUtils(io.streamnative.pulsar.handlers.kop.utils.MessageMetadataUtils) TransactionCoordinator(io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Recycler(io.netty.util.Recycler) ManagedLedgerException(org.apache.bookkeeper.mledger.ManagedLedgerException) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Errors(org.apache.kafka.common.protocol.Errors) FetchRequest(org.apache.kafka.common.requests.FetchRequest) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Entry(org.apache.bookkeeper.mledger.Entry) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) Resource(io.streamnative.pulsar.handlers.kop.security.auth.Resource) Lists(com.google.common.collect.Lists) GetResult(org.apache.pulsar.metadata.api.GetResult) THROTTLE_TIME_MS(org.apache.kafka.common.protocol.CommonFields.THROTTLE_TIME_MS) GroupIdUtils(io.streamnative.pulsar.handlers.kop.utils.GroupIdUtils) ManagedLedgerImpl(org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl) DelayedOperation(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperation) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) ResourceType(io.streamnative.pulsar.handlers.kop.security.auth.ResourceType) MetadataCorruptedException(io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException) KafkaHeaderAndRequest(io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest) AclOperation(org.apache.kafka.common.acl.AclOperation) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TimeUnit(java.util.concurrent.TimeUnit) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) AtomicLong(java.util.concurrent.atomic.AtomicLong) MarkDeleteCallback(org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback) DelayedOperationKey(io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationKey) ReadEntriesCallback(org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback) ApiException(org.apache.kafka.common.errors.ApiException) GetResult(org.apache.pulsar.metadata.api.GetResult) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) Entry(org.apache.bookkeeper.mledger.Entry) CompletableFuture(java.util.concurrent.CompletableFuture) PartitionLog(io.streamnative.pulsar.handlers.kop.storage.PartitionLog) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 4 with DecodeResult

use of io.streamnative.pulsar.handlers.kop.format.DecodeResult in project kop by streamnative.

the class MessageFetchContext method complete.

public void complete() {
    if (resultFuture == null) {
        // the context has been recycled
        return;
    }
    if (resultFuture.isCancelled()) {
        // The request was cancelled by KafkaCommandDecoder when channel is closed or this request is expired,
        // so the Netty buffers should be released.
        decodeResults.forEach(DecodeResult::recycle);
        return;
    }
    if (resultFuture.isDone()) {
        // It may be triggered again in DelayedProduceAndFetch
        return;
    }
    // Keep the order of TopicPartition
    final LinkedHashMap<TopicPartition, PartitionData<MemoryRecords>> orderedResponseData = new LinkedHashMap<>();
    // add the topicPartition with timeout error if it's not existed in responseData
    fetchRequest.fetchData().keySet().forEach(topicPartition -> {
        final PartitionData<MemoryRecords> partitionData = responseData.remove(topicPartition);
        if (partitionData != null) {
            orderedResponseData.put(topicPartition, partitionData);
        } else {
            orderedResponseData.put(topicPartition, new FetchResponse.PartitionData<>(Errors.REQUEST_TIMED_OUT, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
        }
    });
    // Create a copy of this.decodeResults so the lambda expression will capture the current state
    // because this.decodeResults will cleared after resultFuture is completed.
    final List<DecodeResult> decodeResults = new ArrayList<>(this.decodeResults);
    resultFuture.complete(new ResponseCallbackWrapper(new FetchResponse<>(Errors.NONE, orderedResponseData, ((Integer) THROTTLE_TIME_MS.defaultValue), fetchRequest.metadata().sessionId()), () -> {
        // release the batched ByteBuf if necessary
        decodeResults.forEach(DecodeResult::recycle);
    }));
    recycle();
}
Also used : ResponseCallbackWrapper(org.apache.kafka.common.requests.ResponseCallbackWrapper) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) DecodeResult(io.streamnative.pulsar.handlers.kop.format.DecodeResult) PartitionData(org.apache.kafka.common.requests.FetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Aggregations

DecodeResult (io.streamnative.pulsar.handlers.kop.format.DecodeResult)4 ArrayList (java.util.ArrayList)4 LinkedHashMap (java.util.LinkedHashMap)4 TopicPartition (org.apache.kafka.common.TopicPartition)3 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)3 FetchResponse (org.apache.kafka.common.requests.FetchResponse)3 PartitionData (org.apache.kafka.common.requests.FetchResponse.PartitionData)3 ResponseCallbackWrapper (org.apache.kafka.common.requests.ResponseCallbackWrapper)3 Lists (com.google.common.collect.Lists)2 Recycler (io.netty.util.Recycler)2 Handle (io.netty.util.Recycler.Handle)2 KafkaHeaderAndRequest (io.streamnative.pulsar.handlers.kop.KafkaCommandDecoder.KafkaHeaderAndRequest)2 TransactionCoordinator (io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator)2 MetadataCorruptedException (io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException)2 Resource (io.streamnative.pulsar.handlers.kop.security.auth.Resource)2 ResourceType (io.streamnative.pulsar.handlers.kop.security.auth.ResourceType)2 PartitionLog (io.streamnative.pulsar.handlers.kop.storage.PartitionLog)2 GroupIdUtils (io.streamnative.pulsar.handlers.kop.utils.GroupIdUtils)2 KopTopic (io.streamnative.pulsar.handlers.kop.utils.KopTopic)2 MessageMetadataUtils (io.streamnative.pulsar.handlers.kop.utils.MessageMetadataUtils)2