use of io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException in project starlight-for-kafka by datastax.
the class MessageFetchContext method getCommittedEntries.
private List<Entry> getCommittedEntries(List<Entry> entries, long lso) {
List<Entry> committedEntries;
committedEntries = new ArrayList<>();
for (Entry entry : entries) {
try {
if (lso >= MessageMetadataUtils.peekBaseOffsetFromEntry(entry)) {
committedEntries.add(entry);
} else {
break;
}
} catch (MetadataCorruptedException e) {
log.error("[{}:{}] Failed to peek base offset from entry.", entry.getLedgerId(), entry.getEntryId());
}
}
return committedEntries;
}
use of io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException in project starlight-for-kafka by datastax.
the class MessageFetchContext method readEntries.
private CompletableFuture<List<Entry>> readEntries(final ManagedCursor cursor, final TopicPartition topicPartition, final AtomicLong cursorOffset, long adjustedMaxBytes) {
final OpStatsLogger messageReadStats = statsLogger.getMessageReadStats();
// read readeEntryNum size entry.
final long startReadingMessagesNanos = MathUtils.nowInNano();
final CompletableFuture<List<Entry>> readFuture = new CompletableFuture<>();
if (adjustedMaxBytes <= 0) {
readFuture.complete(Lists.newArrayList());
return readFuture;
}
final long originalOffset = cursorOffset.get();
cursor.asyncReadEntries(maxReadEntriesNum, adjustedMaxBytes, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
if (!entries.isEmpty()) {
final Entry lastEntry = entries.get(entries.size() - 1);
final PositionImpl currentPosition = PositionImpl.get(lastEntry.getLedgerId(), lastEntry.getEntryId());
try {
final long lastOffset = MessageMetadataUtils.peekOffsetFromEntry(lastEntry);
// commit the offset, so backlog not affect by this cursor.
commitOffset((NonDurableCursorImpl) cursor, currentPosition);
// and add back to TCM when all read complete.
cursorOffset.set(lastOffset + 1);
if (log.isDebugEnabled()) {
log.debug("Topic {} success read entry: ledgerId: {}, entryId: {}, size: {}," + " ConsumerManager original offset: {}, lastEntryPosition: {}, " + "nextOffset: {}", topicPartition, lastEntry.getLedgerId(), lastEntry.getEntryId(), lastEntry.getLength(), originalOffset, currentPosition, cursorOffset.get());
}
} catch (MetadataCorruptedException e) {
log.error("[{}] Failed to peekOffsetFromEntry from position {}: {}", topicPartition, currentPosition, e.getMessage());
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(e);
return;
}
}
messageReadStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.complete(entries);
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
String fullTopicName = KopTopic.toString(topicPartition, namespacePrefix);
log.error("Error read entry for topic: {}", fullTopicName);
if (exception instanceof ManagedLedgerException.ManagedLedgerFencedException) {
topicManager.invalidateCacheForFencedManagerLedgerOnTopic(fullTopicName);
}
messageReadStats.registerFailedEvent(MathUtils.elapsedNanos(startReadingMessagesNanos), TimeUnit.NANOSECONDS);
readFuture.completeExceptionally(exception);
}
}, null, PositionImpl.latest);
return readFuture;
}
use of io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException in project starlight-for-kafka by datastax.
the class MessageMetadataUtils method getPublishTime.
public static long getPublishTime(final ByteBuf byteBuf) throws MetadataCorruptedException {
final int readerIndex = byteBuf.readerIndex();
final MessageMetadata metadata = parseMessageMetadata(byteBuf);
byteBuf.readerIndex(readerIndex);
if (metadata.hasPublishTime()) {
return metadata.getPublishTime();
} else {
throw new MetadataCorruptedException("Field 'publish_time' is not set");
}
}
use of io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException in project starlight-for-kafka by datastax.
the class AbstractEntryFormatter method decode.
@Override
public DecodeResult decode(List<Entry> entries, byte magic) {
int totalSize = 0;
int conversionCount = 0;
long conversionTimeNanos = 0L;
// batched ByteBuf should be released after sending to client
ByteBuf batchedByteBuf = PulsarByteBufAllocator.DEFAULT.directBuffer(totalSize);
for (Entry entry : entries) {
try {
long startOffset = MessageMetadataUtils.peekBaseOffsetFromEntry(entry);
final ByteBuf byteBuf = entry.getDataBuffer();
final MessageMetadata metadata = MessageMetadataUtils.parseMessageMetadata(byteBuf);
if (isKafkaEntryFormat(metadata)) {
byte batchMagic = byteBuf.getByte(byteBuf.readerIndex() + MAGIC_OFFSET);
byteBuf.setLong(byteBuf.readerIndex() + OFFSET_OFFSET, startOffset);
// need down converted
if (batchMagic > magic) {
long startConversionNanos = MathUtils.nowInNano();
MemoryRecords memoryRecords = MemoryRecords.readableRecords(ByteBufUtils.getNioBuffer(byteBuf));
// down converted, batch magic will be set to client magic
ConvertedRecords<MemoryRecords> convertedRecords = memoryRecords.downConvert(magic, startOffset, time);
conversionCount += convertedRecords.recordConversionStats().numRecordsConverted();
conversionTimeNanos += MathUtils.elapsedNanos(startConversionNanos);
final ByteBuf kafkaBuffer = Unpooled.wrappedBuffer(convertedRecords.records().buffer());
totalSize += kafkaBuffer.readableBytes();
batchedByteBuf.writeBytes(kafkaBuffer);
kafkaBuffer.release();
if (log.isTraceEnabled()) {
log.trace("[{}:{}] MemoryRecords down converted, start offset {}," + " entry magic: {}, client magic: {}", entry.getLedgerId(), entry.getEntryId(), startOffset, batchMagic, magic);
}
} else {
// not need down converted, batch magic retains the magic value written in production
ByteBuf buf = byteBuf.slice(byteBuf.readerIndex(), byteBuf.readableBytes());
totalSize += buf.readableBytes();
batchedByteBuf.writeBytes(buf);
}
} else {
final DecodeResult decodeResult = ByteBufUtils.decodePulsarEntryToKafkaRecords(metadata, byteBuf, startOffset, magic);
conversionCount += decodeResult.getConversionCount();
conversionTimeNanos += decodeResult.getConversionTimeNanos();
final ByteBuf kafkaBuffer = decodeResult.getOrCreateByteBuf();
totalSize += kafkaBuffer.readableBytes();
batchedByteBuf.writeBytes(kafkaBuffer);
decodeResult.recycle();
}
// Almost all exceptions in Kafka inherit from KafkaException and will be captured
// and processed in KafkaApis. Here, whether it is down-conversion or the IOException
// in builder.appendWithOffset in decodePulsarEntryToKafkaRecords will be caught by Kafka
// and the KafkaException will be thrown. So we need to catch KafkaException here.
} catch (MetadataCorruptedException | IOException | KafkaException e) {
// skip failed decode entry
log.error("[{}:{}] Failed to decode entry. ", entry.getLedgerId(), entry.getEntryId(), e);
} finally {
entry.release();
}
}
return DecodeResult.get(MemoryRecords.readableRecords(ByteBufUtils.getNioBuffer(batchedByteBuf)), batchedByteBuf, conversionCount, conversionTimeNanos);
}
use of io.streamnative.pulsar.handlers.kop.exceptions.MetadataCorruptedException in project kop by streamnative.
the class MessageMetadataUtils method getPublishTime.
public static long getPublishTime(final ByteBuf byteBuf) throws MetadataCorruptedException {
final int readerIndex = byteBuf.readerIndex();
final MessageMetadata metadata = parseMessageMetadata(byteBuf);
byteBuf.readerIndex(readerIndex);
if (metadata.hasPublishTime()) {
return metadata.getPublishTime();
} else {
throw new MetadataCorruptedException("Field 'publish_time' is not set");
}
}
Aggregations