Search in sources :

Example 1 with MessageSet

use of org.graylog.shaded.kafka09.message.MessageSet in project graylog2-server by Graylog2.

the class LocalKafkaJournal method flushMessages.

private long flushMessages(List<Message> messages, long payloadSize) {
    if (messages.isEmpty()) {
        LOG.debug("No messages to flush, not trying to write an empty message set.");
        return -1L;
    }
    final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages).toSeq());
    if (LOG.isDebugEnabled()) {
        LOG.debug("Trying to write ByteBufferMessageSet with size of {} bytes to journal", messageSet.sizeInBytes());
    }
    final LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
    long lastWriteOffset = appendInfo.lastOffset();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Wrote {} messages to journal: {} bytes (payload {} bytes), log position {} to {}", messages.size(), messageSet.sizeInBytes(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
    }
    writtenMessages.mark(messages.size());
    return lastWriteOffset;
}
Also used : LogAppendInfo(org.graylog.shaded.kafka09.log.LogAppendInfo) ByteBufferMessageSet(org.graylog.shaded.kafka09.message.ByteBufferMessageSet)

Example 2 with MessageSet

use of org.graylog.shaded.kafka09.message.MessageSet in project graylog2-server by Graylog2.

the class LocalKafkaJournal method read.

/**
 * Read from the journal, starting at the given offset. If the underlying journal implementation returns an empty
 * list of entries, it will be returned even if we know there are more entries in the journal.
 *
 * @param readOffset Offset to start reading at
 * @param requestedMaximumCount Maximum number of entries to return.
 * @return A list of entries
 */
public List<JournalReadEntry> read(long readOffset, long requestedMaximumCount) {
    // Always read at least one!
    final long maximumCount = Math.max(1, requestedMaximumCount);
    long maxOffset = readOffset + maximumCount;
    if (shuttingDown) {
        return Collections.emptyList();
    }
    final List<JournalReadEntry> messages = new ArrayList<>(Ints.saturatedCast(maximumCount));
    try (Timer.Context ignored = readTime.time()) {
        final long logStartOffset = getLogStartOffset();
        if (readOffset < logStartOffset) {
            LOG.info("Read offset {} before start of log at {}, starting to read from the beginning of the journal.", readOffset, logStartOffset);
            readOffset = logStartOffset;
            maxOffset = readOffset + maximumCount;
        }
        LOG.debug("Requesting to read a maximum of {} messages (or 5MB) from the journal, offset interval [{}, {})", maximumCount, readOffset, maxOffset);
        // TODO benchmark and make read-ahead strategy configurable for performance tuning
        final MessageSet messageSet = kafkaLog.read(readOffset, 5 * 1024 * 1024, Option.<Object>apply(maxOffset)).messageSet();
        final Iterator<MessageAndOffset> iterator = messageSet.iterator();
        long firstOffset = Long.MIN_VALUE;
        long lastOffset = Long.MIN_VALUE;
        long totalBytes = 0;
        while (iterator.hasNext()) {
            final MessageAndOffset messageAndOffset = iterator.next();
            if (firstOffset == Long.MIN_VALUE) {
                firstOffset = messageAndOffset.offset();
            }
            // always remember the last seen offset for debug purposes below
            lastOffset = messageAndOffset.offset();
            final byte[] payloadBytes = ByteBufferUtils.readBytes(messageAndOffset.message().payload());
            if (LOG.isTraceEnabled()) {
                final byte[] keyBytes = ByteBufferUtils.readBytes(messageAndOffset.message().key());
                LOG.trace("Read message {} contains {}", bytesToHex(keyBytes), bytesToHex(payloadBytes));
            }
            totalBytes += payloadBytes.length;
            messages.add(new JournalReadEntry(payloadBytes, messageAndOffset.offset()));
            // remember where to read from
            nextReadOffset = messageAndOffset.nextOffset();
        }
        if (messages.isEmpty()) {
            LOG.debug("No messages available to read for offset interval [{}, {}).", readOffset, maxOffset);
        } else {
            LOG.debug("Read {} messages, total payload size {}, from journal, offset interval [{}, {}], requested read at {}", messages.size(), totalBytes, firstOffset, lastOffset, readOffset);
        }
    } catch (OffsetOutOfRangeException e) {
        // This is fine, the reader tries to read faster than the writer committed data. Next read will get the data.
        LOG.debug("Offset out of range, no messages available starting at offset {}", readOffset);
    } catch (Exception e) {
        // sigh.
        if (shuttingDown) {
            LOG.debug("Caught exception during shutdown, ignoring it because we might have been blocked on a read.");
            return Collections.emptyList();
        }
        // noinspection ConstantConditions
        if (e instanceof ClosedByInterruptException) {
            LOG.debug("Interrupted while reading from journal, during shutdown this is harmless and ignored.", e);
        } else {
            throw e;
        }
    }
    readMessages.mark(messages.size());
    return messages;
}
Also used : ArrayList(java.util.ArrayList) MessageAndOffset(org.graylog.shaded.kafka09.message.MessageAndOffset) UncheckedIOException(java.io.UncheckedIOException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) SyncFailedException(java.io.SyncFailedException) OffsetOutOfRangeException(org.graylog.shaded.kafka09.common.OffsetOutOfRangeException) AccessDeniedException(java.nio.file.AccessDeniedException) IOException(java.io.IOException) KafkaException(org.graylog.shaded.kafka09.common.KafkaException) ByteBufferMessageSet(org.graylog.shaded.kafka09.message.ByteBufferMessageSet) MessageSet(org.graylog.shaded.kafka09.message.MessageSet) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) HdrTimer(org.graylog2.shared.metrics.HdrTimer) Timer(com.codahale.metrics.Timer) OffsetOutOfRangeException(org.graylog.shaded.kafka09.common.OffsetOutOfRangeException)

Aggregations

ByteBufferMessageSet (org.graylog.shaded.kafka09.message.ByteBufferMessageSet)2 Timer (com.codahale.metrics.Timer)1 IOException (java.io.IOException)1 SyncFailedException (java.io.SyncFailedException)1 UncheckedIOException (java.io.UncheckedIOException)1 ClosedByInterruptException (java.nio.channels.ClosedByInterruptException)1 AccessDeniedException (java.nio.file.AccessDeniedException)1 ArrayList (java.util.ArrayList)1 KafkaException (org.graylog.shaded.kafka09.common.KafkaException)1 OffsetOutOfRangeException (org.graylog.shaded.kafka09.common.OffsetOutOfRangeException)1 LogAppendInfo (org.graylog.shaded.kafka09.log.LogAppendInfo)1 MessageAndOffset (org.graylog.shaded.kafka09.message.MessageAndOffset)1 MessageSet (org.graylog.shaded.kafka09.message.MessageSet)1 HdrTimer (org.graylog2.shared.metrics.HdrTimer)1