use of io.streamnative.pulsar.handlers.kop.storage.PartitionLog in project starlight-for-kafka by datastax.
the class MessageFetchContext method handleEntries.
private void handleEntries(final List<Entry> entries, final TopicPartition topicPartition, final FetchRequest.PartitionData partitionData, final KafkaTopicConsumerManager tcm, final ManagedCursor cursor, final AtomicLong cursorOffset, final boolean readCommitted) {
final long highWatermark = MessageMetadataUtils.getHighWatermark(cursor.getManagedLedger());
// Add new offset back to TCM after entries are read successfully
tcm.add(cursorOffset.get(), Pair.of(cursor, cursorOffset.get()));
PartitionLog partitionLog = requestHandler.getReplicaManager().getPartitionLog(topicPartition, namespacePrefix);
final long lso = (readCommitted ? partitionLog.firstUndecidedOffset().orElse(highWatermark) : highWatermark);
List<Entry> committedEntries = entries;
if (readCommitted) {
committedEntries = getCommittedEntries(entries, lso);
if (log.isDebugEnabled()) {
log.debug("Request {}: read {} entries but only {} entries are committed", header, entries.size(), committedEntries.size());
}
} else {
if (log.isDebugEnabled()) {
log.debug("Request {}: read {} entries", header, entries.size());
}
}
if (committedEntries.isEmpty()) {
addErrorPartitionResponse(topicPartition, Errors.NONE);
return;
}
// use compatible magic value by apiVersion
short apiVersion = header.apiVersion();
final byte magic;
if (apiVersion <= 1) {
magic = RecordBatch.MAGIC_VALUE_V0;
} else if (apiVersion <= 3) {
magic = RecordBatch.MAGIC_VALUE_V1;
} else {
magic = RecordBatch.CURRENT_MAGIC_VALUE;
}
CompletableFuture<String> groupNameFuture = requestHandler.getCurrentConnectedGroup().computeIfAbsent(clientHost, clientHost -> {
CompletableFuture<String> future = new CompletableFuture<>();
String groupIdPath = GroupIdUtils.groupIdPathFormat(clientHost, header.clientId());
requestHandler.getMetadataStore().get(requestHandler.getGroupIdStoredPath() + groupIdPath).thenAccept(getResultOpt -> {
if (getResultOpt.isPresent()) {
GetResult getResult = getResultOpt.get();
future.complete(new String(getResult.getValue() == null ? new byte[0] : getResult.getValue(), StandardCharsets.UTF_8));
} else {
future.complete("");
}
}).exceptionally(ex -> {
future.completeExceptionally(ex);
return null;
});
return future;
});
// this part is heavyweight, and we should not execute in the ManagedLedger Ordered executor thread
groupNameFuture.whenCompleteAsync((groupName, ex) -> {
if (ex != null) {
log.error("Get groupId failed.", ex);
groupName = "";
}
final long startDecodingEntriesNanos = MathUtils.nowInNano();
final DecodeResult decodeResult = requestHandler.getEntryFormatter().decode(entries, magic);
requestHandler.requestStats.getFetchDecodeStats().registerSuccessfulEvent(MathUtils.elapsedNanos(startDecodingEntriesNanos), TimeUnit.NANOSECONDS);
decodeResults.add(decodeResult);
final MemoryRecords kafkaRecords = decodeResult.getRecords();
// collect consumer metrics
decodeResult.updateConsumerStats(topicPartition, entries.size(), groupName, statsLogger);
List<FetchResponse.AbortedTransaction> abortedTransactions;
if (readCommitted) {
abortedTransactions = partitionLog.getAbortedIndexList(partitionData.fetchOffset);
} else {
abortedTransactions = null;
}
responseData.put(topicPartition, new PartitionData<>(Errors.NONE, highWatermark, lso, // TODO: should it be changed to the logStartOffset?
highWatermark, abortedTransactions, kafkaRecords));
bytesReadable.getAndAdd(kafkaRecords.sizeInBytes());
tryComplete();
}, requestHandler.getDecodeExecutor());
}
use of io.streamnative.pulsar.handlers.kop.storage.PartitionLog in project kop by streamnative.
the class MessageFetchContext method handleEntries.
private void handleEntries(final List<Entry> entries, final TopicPartition topicPartition, final FetchRequest.PartitionData partitionData, final KafkaTopicConsumerManager tcm, final ManagedCursor cursor, final AtomicLong cursorOffset, final boolean readCommitted) {
final long highWatermark = MessageMetadataUtils.getHighWatermark(cursor.getManagedLedger());
// Add new offset back to TCM after entries are read successfully
tcm.add(cursorOffset.get(), Pair.of(cursor, cursorOffset.get()));
PartitionLog partitionLog = requestHandler.getReplicaManager().getPartitionLog(topicPartition, namespacePrefix);
final long lso = (readCommitted ? partitionLog.firstUndecidedOffset().orElse(highWatermark) : highWatermark);
List<Entry> committedEntries = entries;
if (readCommitted) {
committedEntries = getCommittedEntries(entries, lso);
if (log.isDebugEnabled()) {
log.debug("Request {}: read {} entries but only {} entries are committed", header, entries.size(), committedEntries.size());
}
} else {
if (log.isDebugEnabled()) {
log.debug("Request {}: read {} entries", header, entries.size());
}
}
if (committedEntries.isEmpty()) {
addErrorPartitionResponse(topicPartition, Errors.NONE);
return;
}
// use compatible magic value by apiVersion
short apiVersion = header.apiVersion();
final byte magic;
if (apiVersion <= 1) {
magic = RecordBatch.MAGIC_VALUE_V0;
} else if (apiVersion <= 3) {
magic = RecordBatch.MAGIC_VALUE_V1;
} else {
magic = RecordBatch.CURRENT_MAGIC_VALUE;
}
CompletableFuture<String> groupNameFuture = requestHandler.getCurrentConnectedGroup().computeIfAbsent(clientHost, clientHost -> {
CompletableFuture<String> future = new CompletableFuture<>();
String groupIdPath = GroupIdUtils.groupIdPathFormat(clientHost, header.clientId());
requestHandler.getMetadataStore().get(requestHandler.getGroupIdStoredPath() + groupIdPath).thenAccept(getResultOpt -> {
if (getResultOpt.isPresent()) {
GetResult getResult = getResultOpt.get();
future.complete(new String(getResult.getValue() == null ? new byte[0] : getResult.getValue(), StandardCharsets.UTF_8));
} else {
future.complete("");
}
}).exceptionally(ex -> {
future.completeExceptionally(ex);
return null;
});
return future;
});
// this part is heavyweight, and we should not execute in the ManagedLedger Ordered executor thread
groupNameFuture.whenCompleteAsync((groupName, ex) -> {
if (ex != null) {
log.error("Get groupId failed.", ex);
groupName = "";
}
final long startDecodingEntriesNanos = MathUtils.nowInNano();
final DecodeResult decodeResult = requestHandler.getEntryFormatter().decode(entries, magic);
requestHandler.requestStats.getFetchDecodeStats().registerSuccessfulEvent(MathUtils.elapsedNanos(startDecodingEntriesNanos), TimeUnit.NANOSECONDS);
decodeResults.add(decodeResult);
final MemoryRecords kafkaRecords = decodeResult.getRecords();
// collect consumer metrics
decodeResult.updateConsumerStats(topicPartition, entries.size(), groupName, statsLogger);
List<FetchResponse.AbortedTransaction> abortedTransactions;
if (readCommitted) {
abortedTransactions = partitionLog.getAbortedIndexList(partitionData.fetchOffset);
} else {
abortedTransactions = null;
}
responseData.put(topicPartition, new PartitionData<>(Errors.NONE, highWatermark, lso, // TODO: should it be changed to the logStartOffset?
highWatermark, abortedTransactions, kafkaRecords));
bytesReadable.getAndAdd(kafkaRecords.sizeInBytes());
tryComplete();
}, requestHandler.getDecodeExecutor());
}
Aggregations