use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class MockLogReader method generateWorkflowRunLogs.
/**
* Generate logs for Workflow run.
*/
private void generateWorkflowRunLogs(LoggingContext loggingContext) {
Logger logger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
String programName;
if (loggingContext instanceof WorkflowProgramLoggingContext) {
// Logging is being done for programs running inside Workflow
LoggingContext.SystemTag systemTag;
systemTag = loggingContext.getSystemTagsMap().get(WorkflowProgramLoggingContext.TAG_WORKFLOW_MAP_REDUCE_ID);
if (systemTag == null) {
systemTag = loggingContext.getSystemTagsMap().get(WorkflowProgramLoggingContext.TAG_WORKFLOW_SPARK_ID);
}
programName = systemTag.getValue();
} else {
// Logging is done for Workflow
programName = loggingContext.getSystemTagsMap().get(WorkflowLoggingContext.TAG_WORKFLOW_ID).getValue();
}
for (int i = 0; i < MAX; i++) {
LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) logger, Level.INFO, programName + "<img>-" + i, null, null);
Map<String, String> tagMap = Maps.newHashMap(Maps.transformValues(loggingContext.getSystemTagsMap(), TAG_TO_STRING_FUNCTION));
event.setMDCPropertyMap(tagMap);
logEvents.add(new LogEvent(event, new LogOffset(i, i)));
}
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class MockLogReader method generateLogs.
/**
* This method is used to generate the logs for program which are used for testing.
* Single call to this method would add {@link #MAX} number of events.
* First 20 events are generated without {@link ApplicationLoggingContext#TAG_RUN_ID} tag.
* For next 40 events, alternate event is tagged with {@code ApplicationLoggingContext#TAG_RUN_ID}.
* Last 20 events are not tagged with {@code ApplicationLoggingContext#TAG_RUN_ID}.
* All events are alternately marked as {@link Level#ERROR} and {@link Level#WARN}.
* All events are alternately tagged with "plugin", "program" and "system" as value of MDC property ".origin"
* All events are alternately tagged with "lifecycle" as value of MDC property "MDC:eventType
*/
private void generateLogs(LoggingContext loggingContext, ProgramId programId, ProgramRunStatus runStatus) throws InterruptedException {
// All possible values of " MDC property ".origin
String[] origins = { "plugin", "program", "system" };
String entityId = LoggingContextHelper.getEntityId(loggingContext).getValue();
StackTraceElement stackTraceElementNative = new StackTraceElement("co.cask.Test", "testMethod", null, -2);
RunId runId = null;
Long stopTs = null;
for (int i = 0; i < MAX; ++i) {
// Setup run id for event with ids >= 20
if (i == 20) {
runId = RunIds.generate(TimeUnit.SECONDS.toMillis(getMockTimeSecs(i)));
} else if (i == 60 && runStatus != ProgramRunStatus.RUNNING && runStatus != ProgramRunStatus.SUSPENDED) {
// Record stop time for run for 60th event, but still continue to record run in the other logging events.
stopTs = getMockTimeSecs(i);
}
LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), i % 2 == 0 ? Level.ERROR : Level.WARN, entityId + "<img>-" + i, null, null);
event.setTimeStamp(TimeUnit.SECONDS.toMillis(getMockTimeSecs(i)));
// Add runid to logging context
Map<String, String> tagMap = Maps.newHashMap(Maps.transformValues(loggingContext.getSystemTagsMap(), TAG_TO_STRING_FUNCTION));
if (runId != null && stopTs == null && i % 2 == 0) {
tagMap.put(ApplicationLoggingContext.TAG_RUN_ID, runId.getId());
}
// Determine the value of ".origin" property by (i % 3)
tagMap.put(".origin", origins[i % 3]);
if (i % 2 == 0) {
tagMap.put("MDC:eventType", "lifecycle");
}
if (i == 30) {
event.setCallerData(new StackTraceElement[] { stackTraceElementNative });
}
event.setMDCPropertyMap(tagMap);
logEvents.add(new LogEvent(event, new LogOffset(i, i)));
}
long startTs = RunIds.getTime(runId, TimeUnit.SECONDS);
if (programId != null) {
// noinspection ConstantConditions
runRecordMap.put(programId, RunRecord.builder().setRunId(runId.getId()).setStartTime(startTs).setRunTime(startTs + 1).setStopTime(stopTs).setStatus(runStatus).setCluster(new ProgramRunCluster(ProgramRunClusterStatus.PROVISIONED, null, null)).build());
setStartAndRunning(programId.run(runId.getId()), startTs);
if (stopTs != null) {
store.setStop(programId.run(runId.getId()), stopTs, runStatus, AppFabricTestHelper.createSourceId(++sourceId));
}
}
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class MockLogReader method getLogPrev.
@Override
public void getLogPrev(LoggingContext loggingContext, ReadRange readRange, int maxEvents, Filter filter, Callback callback) {
if (readRange.getKafkaOffset() < 0) {
readRange = new ReadRange(readRange.getFromMillis(), readRange.getToMillis(), MAX);
}
Filter contextFilter = LoggingContextHelper.createFilter(loggingContext);
callback.init();
try {
int count = 0;
long startOffset = readRange.getKafkaOffset() - maxEvents;
for (LogEvent logLine : logEvents) {
long logTime = logLine.getLoggingEvent().getTimeStamp();
if (!contextFilter.match(logLine.getLoggingEvent()) || logTime < readRange.getFromMillis() || logTime >= readRange.getToMillis()) {
continue;
}
if (logLine.getOffset().getKafkaOffset() >= startOffset && logLine.getOffset().getKafkaOffset() < readRange.getKafkaOffset()) {
if (++count > maxEvents) {
break;
}
if (filter != Filter.EMPTY_FILTER && logLine.getOffset().getKafkaOffset() % 2 != 0) {
continue;
}
callback.handle(logLine);
}
}
} catch (Throwable e) {
LOG.error("Got exception", e);
} finally {
callback.close();
}
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class AbstractLogHandler method doGetLogs.
protected void doGetLogs(HttpResponder responder, LoggingContext loggingContext, long fromTimeSecsParam, long toTimeSecsParam, boolean escape, String filterStr, @Nullable RunRecordMeta runRecord, String format, List<String> fieldsToSuppress) {
try {
TimeRange timeRange = parseTime(fromTimeSecsParam, toTimeSecsParam, responder);
if (timeRange == null) {
return;
}
Filter filter = FilterParser.parse(filterStr);
ReadRange readRange = new ReadRange(timeRange.getFromMillis(), timeRange.getToMillis(), LogOffset.INVALID_KAFKA_OFFSET);
readRange = adjustReadRange(readRange, runRecord, fromTimeSecsParam != -1);
AbstractChunkedLogProducer logsProducer = null;
try {
// the iterator is closed by the BodyProducer passed to the HttpResponder
CloseableIterator<LogEvent> logIter = logReader.getLog(loggingContext, readRange.getFromMillis(), readRange.getToMillis(), filter);
logsProducer = getFullLogsProducer(format, logIter, fieldsToSuppress, escape);
} catch (Exception ex) {
LOG.debug("Exception while reading logs for logging context {}", loggingContext, ex);
if (logsProducer != null) {
logsProducer.close();
}
responder.sendStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
return;
}
responder.sendContent(HttpResponseStatus.OK, logsProducer, logsProducer.getResponseHeaders());
} catch (SecurityException e) {
responder.sendStatus(HttpResponseStatus.UNAUTHORIZED);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class TextChunkedLogProducer method writeLogEvents.
@Override
protected ByteBuf writeLogEvents(CloseableIterator<LogEvent> logEventIter) throws IOException {
ByteBuf buffer = Unpooled.buffer(BUFFER_BYTES);
while (logEventIter.hasNext() && buffer.readableBytes() < BUFFER_BYTES) {
LogEvent logEvent = logEventIter.next();
String logLine = patternLayout.doLayout(logEvent.getLoggingEvent());
logLine = escape ? StringEscapeUtils.escapeHtml(logLine) : logLine;
buffer.writeCharSequence(logLine, StandardCharsets.UTF_8);
}
return buffer;
}
Aggregations