use of org.apache.bookkeeper.mledger.impl.NonDurableCursorImpl in project starlight-for-kafka by datastax.
the class MessageFetchContext method readEntries.
private CompletableFuture<List<Entry>> readEntries(final ManagedCursor cursor, final TopicPartition topicPartition, final AtomicLong cursorOffset, long adjustedMaxBytes) {
final OpStatsLogger messageReadStats = statsLogger.getMessageReadStats();
// read readeEntryNum size entry.
final long startReadingMessagesNanos = MathUtils.nowInNano();
final CompletableFuture<List<Entry>> readFuture = new CompletableFuture<>();
if (adjustedMaxBytes <= 0) {
readFuture.complete(Lists.newArrayList());
return readFuture;
}
final long originalOffset = cursorOffset.get();
cursor.asyncReadEntries(maxReadEntriesNum, adjustedMaxBytes, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
if (!entries.isEmpty()) {
final Entry lastEntry = entries.get(entries.size() - 1);
final PositionImpl currentPosition = PositionImpl.get(lastEntry.getLedgerId(), lastEntry.getEntryId());
try {
final long lastOffset = MessageMetadataUtils.peekOffsetFromEntry(lastEntry);
// commit the offset, so backlog not affect by this cursor.
commitOffset((NonDurableCursorImpl) cursor, currentPosition);
// and add back to TCM when all read complete.
cursorOffset.set(lastOffset + 1);
if (log.isDebugEnabled()) {
log.debug("Topic {} success read entry: ledgerId: {}, entryId: {}, size: {}," + " ConsumerManager original offset: {}, lastEntryPosition: {}, " + "nextOffset: {}", topicPartition, lastEntry.getLedgerId(), lastEntry.getEntryId(), lastEntry.getLength(), originalOffset, currentPosition, cursorOffset.get());
}
} catch (MetadataCorruptedException e) {
log.error("[{}] Failed to peekOffsetFromEntry from position {}: {}", topicPartition, currentPosition, e.getMessage());
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(e);
return;
}
}
messageReadStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.complete(entries);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
String fullTopicName = KopTopic.toString(topicPartition, namespacePrefix);
log.error("Error read entry for topic: {}", fullTopicName);
if (exception instanceof ManagedLedgerException.ManagedLedgerFencedException) {
topicManager.invalidateCacheForFencedManagerLedgerOnTopic(fullTopicName);
}
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(exception);
}
}, null, PositionImpl.latest);
return readFuture;
}
use of org.apache.bookkeeper.mledger.impl.NonDurableCursorImpl in project kop by streamnative.
the class MessageFetchContext method readEntries.
private CompletableFuture<List<Entry>> readEntries(final ManagedCursor cursor, final TopicPartition topicPartition, final AtomicLong cursorOffset, long adjustedMaxBytes) {
final OpStatsLogger messageReadStats = statsLogger.getMessageReadStats();
// read readeEntryNum size entry.
final long startReadingMessagesNanos = MathUtils.nowInNano();
final CompletableFuture<List<Entry>> readFuture = new CompletableFuture<>();
if (adjustedMaxBytes <= 0) {
readFuture.complete(Lists.newArrayList());
return readFuture;
}
final long originalOffset = cursorOffset.get();
cursor.asyncReadEntries(maxReadEntriesNum, adjustedMaxBytes, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
if (!entries.isEmpty()) {
final Entry lastEntry = entries.get(entries.size() - 1);
final PositionImpl currentPosition = PositionImpl.get(lastEntry.getLedgerId(), lastEntry.getEntryId());
try {
final long lastOffset = MessageMetadataUtils.peekOffsetFromEntry(lastEntry);
// commit the offset, so backlog not affect by this cursor.
commitOffset((NonDurableCursorImpl) cursor, currentPosition);
// and add back to TCM when all read complete.
cursorOffset.set(lastOffset + 1);
if (log.isDebugEnabled()) {
log.debug("Topic {} success read entry: ledgerId: {}, entryId: {}, size: {}," + " ConsumerManager original offset: {}, lastEntryPosition: {}, " + "nextOffset: {}", topicPartition, lastEntry.getLedgerId(), lastEntry.getEntryId(), lastEntry.getLength(), originalOffset, currentPosition, cursorOffset.get());
}
} catch (MetadataCorruptedException e) {
log.error("[{}] Failed to peekOffsetFromEntry from position {}: {}", topicPartition, currentPosition, e.getMessage());
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(e);
return;
}
}
messageReadStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.complete(entries);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
String fullTopicName = KopTopic.toString(topicPartition, namespacePrefix);
log.error("Error read entry for topic: {}", fullTopicName);
if (exception instanceof ManagedLedgerException.ManagedLedgerFencedException) {
topicManager.invalidateCacheForFencedManagerLedgerOnTopic(fullTopicName);
}
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(exception);
}
}, null, PositionImpl.LATEST);
return readFuture;
}
Aggregations