Search in sources :

Example 1 with KafkaConsumer

use of co.cask.cdap.logging.kafka.KafkaConsumer in project cdap by caskdata.

the class KafkaLogReader method getLogNext.

@Override
public void getLogNext(LoggingContext loggingContext, ReadRange readRange, int maxEvents, Filter filter, Callback callback) {
    if (readRange.getKafkaOffset() == ReadRange.LATEST.getKafkaOffset()) {
        getLogPrev(loggingContext, readRange, maxEvents, filter, callback);
        return;
    }
    int partition = partitioner.partition(loggingContext.getLogPartition(), -1);
    LOG.trace("Reading from kafka {}:{}", topic, partition);
    callback.init();
    KafkaConsumer kafkaConsumer = new KafkaConsumer(brokerService, topic, partition, KAFKA_FETCH_TIMEOUT_MS);
    try {
        // Try to get the offset corresponding to fromOffset.getTime()
        if (readRange.getKafkaOffset() == LogOffset.INVALID_KAFKA_OFFSET) {
            readRange = new ReadRange(readRange.getFromMillis(), readRange.getToMillis(), kafkaConsumer.fetchOffsetBefore(readRange.getFromMillis()));
        }
        Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext), filter));
        long latestOffset = kafkaConsumer.fetchLatestOffset();
        long startOffset = readRange.getKafkaOffset() + 1;
        LOG.trace("Using startOffset={}, latestOffset={}, readRange={}", startOffset, latestOffset, readRange);
        if (startOffset >= latestOffset) {
            // At end of events, nothing to return
            return;
        }
        KafkaCallback kafkaCallback = new KafkaCallback(logFilter, serializer, latestOffset, maxEvents, callback, readRange.getFromMillis());
        fetchLogEvents(kafkaConsumer, kafkaCallback, startOffset, latestOffset, maxEvents, readRange);
    } catch (Throwable e) {
        LOG.error("Got exception: ", e);
        throw Throwables.propagate(e);
    } finally {
        try {
            kafkaConsumer.close();
        } catch (IOException e) {
            LOG.error(String.format("Caught exception when closing KafkaConsumer for topic %s, partition %d", topic, partition), e);
        }
    }
}
Also used : AndFilter(co.cask.cdap.logging.filter.AndFilter) Filter(co.cask.cdap.logging.filter.Filter) AndFilter(co.cask.cdap.logging.filter.AndFilter) KafkaConsumer(co.cask.cdap.logging.kafka.KafkaConsumer) IOException(java.io.IOException)

Example 2 with KafkaConsumer

use of co.cask.cdap.logging.kafka.KafkaConsumer in project cdap by caskdata.

the class KafkaLogReader method getLogPrev.

@Override
public void getLogPrev(LoggingContext loggingContext, ReadRange readRange, int maxEvents, Filter filter, Callback callback) {
    if (readRange.getKafkaOffset() == LogOffset.INVALID_KAFKA_OFFSET) {
        readRange = new ReadRange(readRange.getFromMillis(), readRange.getToMillis(), ReadRange.LATEST.getKafkaOffset());
    }
    int partition = partitioner.partition(loggingContext.getLogPartition(), -1);
    LOG.trace("Reading from kafka partition {}", partition);
    callback.init();
    KafkaConsumer kafkaConsumer = new KafkaConsumer(brokerService, topic, partition, KAFKA_FETCH_TIMEOUT_MS);
    try {
        Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext), filter));
        long latestOffset = kafkaConsumer.fetchLatestOffset();
        long earliestOffset = kafkaConsumer.fetchEarliestOffset();
        long stopOffset;
        long startOffset;
        if (readRange.getKafkaOffset() < 0) {
            stopOffset = latestOffset;
        } else {
            stopOffset = readRange.getKafkaOffset();
        }
        startOffset = stopOffset - maxEvents;
        if (startOffset < earliestOffset) {
            startOffset = earliestOffset;
        }
        LOG.trace("Using startOffset={}, latestOffset={}, readRange={}", startOffset, latestOffset, readRange);
        if (startOffset >= stopOffset || startOffset >= latestOffset) {
            // At end of kafka events, nothing to return
            return;
        }
        KafkaCallback kafkaCallback = new KafkaCallback(logFilter, serializer, stopOffset, maxEvents, callback, readRange.getFromMillis());
        // Events between startOffset and stopOffset may not have the required logs we are looking for,
        // we'll need to return at least 1 log offset for next getLogPrev call to work.
        int fetchCount = 0;
        while (fetchCount == 0 && kafkaCallback.getEventsRead() <= MAX_READ_EVENTS_KAFKA) {
            fetchCount = fetchLogEvents(kafkaConsumer, kafkaCallback, startOffset, stopOffset, maxEvents, readRange);
            stopOffset = startOffset;
            if (stopOffset <= earliestOffset) {
                // Truly no log messages found.
                break;
            }
            startOffset = stopOffset - maxEvents;
            if (startOffset < earliestOffset) {
                startOffset = earliestOffset;
            }
        }
    } catch (Throwable e) {
        LOG.error("Got exception: ", e);
        throw Throwables.propagate(e);
    } finally {
        try {
            kafkaConsumer.close();
        } catch (IOException e) {
            LOG.error(String.format("Caught exception when closing KafkaConsumer for topic %s, partition %d", topic, partition), e);
        }
    }
}
Also used : AndFilter(co.cask.cdap.logging.filter.AndFilter) Filter(co.cask.cdap.logging.filter.Filter) AndFilter(co.cask.cdap.logging.filter.AndFilter) KafkaConsumer(co.cask.cdap.logging.kafka.KafkaConsumer) IOException(java.io.IOException)

Aggregations

AndFilter (co.cask.cdap.logging.filter.AndFilter)2 Filter (co.cask.cdap.logging.filter.Filter)2 KafkaConsumer (co.cask.cdap.logging.kafka.KafkaConsumer)2 IOException (java.io.IOException)2