use of co.cask.cdap.logging.read.ReadRange in project cdap by caskdata.
the class MockLogReader method getLogPrev.
@Override
public void getLogPrev(LoggingContext loggingContext, ReadRange readRange, int maxEvents, Filter filter, Callback callback) {
if (readRange.getKafkaOffset() < 0) {
readRange = new ReadRange(readRange.getFromMillis(), readRange.getToMillis(), MAX);
}
Filter contextFilter = LoggingContextHelper.createFilter(loggingContext);
callback.init();
try {
int count = 0;
long startOffset = readRange.getKafkaOffset() - maxEvents;
for (LogEvent logLine : logEvents) {
long logTime = logLine.getLoggingEvent().getTimeStamp();
if (!contextFilter.match(logLine.getLoggingEvent()) || logTime < readRange.getFromMillis() || logTime >= readRange.getToMillis()) {
continue;
}
if (logLine.getOffset().getKafkaOffset() >= startOffset && logLine.getOffset().getKafkaOffset() < readRange.getKafkaOffset()) {
if (++count > maxEvents) {
break;
}
if (filter != Filter.EMPTY_FILTER && logLine.getOffset().getKafkaOffset() % 2 != 0) {
continue;
}
callback.handle(logLine);
}
}
} catch (Throwable e) {
LOG.error("Got exception", e);
} finally {
callback.close();
}
}
use of co.cask.cdap.logging.read.ReadRange in project cdap by caskdata.
the class AbstractLogHandler method doPrev.
protected void doPrev(HttpResponder responder, LoggingContext loggingContext, int maxEvents, String fromOffsetStr, boolean escape, String filterStr, @Nullable RunRecordMeta runRecord, String format, List<String> fieldsToSuppress) {
try {
Filter filter = FilterParser.parse(filterStr);
Callback logCallback = getNextOrPrevLogsCallback(format, responder, fieldsToSuppress, escape);
LogOffset logOffset = FormattedTextLogEvent.parseLogOffset(fromOffsetStr);
ReadRange readRange = ReadRange.createToRange(logOffset);
readRange = adjustReadRange(readRange, runRecord, true);
try {
logReader.getLogPrev(loggingContext, readRange, maxEvents, filter, logCallback);
} catch (Exception ex) {
LOG.debug("Exception while reading logs for logging context {}", loggingContext, ex);
} finally {
logCallback.close();
}
} catch (SecurityException e) {
responder.sendStatus(HttpResponseStatus.UNAUTHORIZED);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
use of co.cask.cdap.logging.read.ReadRange in project cdap by caskdata.
the class AbstractLogHandler method doGetLogs.
protected void doGetLogs(HttpResponder responder, LoggingContext loggingContext, long fromTimeSecsParam, long toTimeSecsParam, boolean escape, String filterStr, @Nullable RunRecordMeta runRecord, String format, List<String> fieldsToSuppress) {
try {
TimeRange timeRange = parseTime(fromTimeSecsParam, toTimeSecsParam, responder);
if (timeRange == null) {
return;
}
Filter filter = FilterParser.parse(filterStr);
ReadRange readRange = new ReadRange(timeRange.getFromMillis(), timeRange.getToMillis(), LogOffset.INVALID_KAFKA_OFFSET);
readRange = adjustReadRange(readRange, runRecord, fromTimeSecsParam != -1);
AbstractChunkedLogProducer logsProducer = null;
try {
// the iterator is closed by the BodyProducer passed to the HttpResponder
CloseableIterator<LogEvent> logIter = logReader.getLog(loggingContext, readRange.getFromMillis(), readRange.getToMillis(), filter);
logsProducer = getFullLogsProducer(format, logIter, fieldsToSuppress, escape);
} catch (Exception ex) {
LOG.debug("Exception while reading logs for logging context {}", loggingContext, ex);
if (logsProducer != null) {
logsProducer.close();
}
responder.sendStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
responder.sendContent(HttpResponseStatus.OK, logsProducer, logsProducer.getResponseHeaders());
} catch (SecurityException e) {
responder.sendStatus(HttpResponseStatus.UNAUTHORIZED);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
use of co.cask.cdap.logging.read.ReadRange in project cdap by caskdata.
the class AbstractLogHandler method adjustReadRange.
/**
* If readRange is outside runRecord's range, then the readRange is adjusted to fall within runRecords range.
*/
private ReadRange adjustReadRange(ReadRange readRange, @Nullable RunRecordMeta runRecord, boolean fromTimeSpecified) {
if (runRecord == null) {
return readRange;
}
long fromTimeMillis = readRange.getFromMillis();
long toTimeMillis = readRange.getToMillis();
long runStartMillis = TimeUnit.SECONDS.toMillis(runRecord.getStartTs());
if (!fromTimeSpecified) {
// If from time is not specified explicitly, use the run records start time as from time
fromTimeMillis = runStartMillis;
}
if (fromTimeMillis < runStartMillis) {
// If from time is specified but is smaller than run records start time, reset it to
// run record start time. This is to optimize so that we do not look into extra files.
fromTimeMillis = runStartMillis;
}
if (runRecord.getStopTs() != null) {
// Add a buffer to stop time due to CDAP-3100
long runStopMillis = TimeUnit.SECONDS.toMillis(runRecord.getStopTs() + 1);
if (toTimeMillis > runStopMillis) {
toTimeMillis = runStopMillis;
}
}
ReadRange adjusted = new ReadRange(fromTimeMillis, toTimeMillis, readRange.getKafkaOffset());
LOG.trace("Original read range: {}. Adjusted read range: {}", readRange, adjusted);
return adjusted;
}
use of co.cask.cdap.logging.read.ReadRange in project cdap by caskdata.
the class TestDistributedLogReader method testDistributedLogPrevBoth.
@Test
public void testDistributedLogPrevBoth() throws Exception {
ReadRange readRange = new ReadRange(0, Long.MAX_VALUE, LogOffset.INVALID_KAFKA_OFFSET);
testDistributedLogPrev(readRange, LOGGING_CONTEXT_BOTH, 16, 4, "TestDistributedLogReader Log message1 ", 60);
readRange = new ReadRange(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(1), System.currentTimeMillis(), LogOffset.INVALID_KAFKA_OFFSET);
testDistributedLogPrev(readRange, LOGGING_CONTEXT_BOTH, 16, 4, "TestDistributedLogReader Log message1 ", 60);
testDistributedLogPrev(ReadRange.LATEST, LOGGING_CONTEXT_BOTH, 9, 8, "TestDistributedLogReader Log message1 ", 60);
}
Aggregations