use of ch.qos.logback.classic.spi.LoggingEvent in project cdap by caskdata.
the class MockLogReader method generateWorkflowRunLogs.
/**
* Generate logs for Workflow run.
*/
private void generateWorkflowRunLogs(LoggingContext loggingContext) {
Logger logger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
String programName;
if (loggingContext instanceof WorkflowProgramLoggingContext) {
// Logging is being done for programs running inside Workflow
LoggingContext.SystemTag systemTag;
systemTag = loggingContext.getSystemTagsMap().get(WorkflowProgramLoggingContext.TAG_WORKFLOW_MAP_REDUCE_ID);
if (systemTag == null) {
systemTag = loggingContext.getSystemTagsMap().get(WorkflowProgramLoggingContext.TAG_WORKFLOW_SPARK_ID);
}
programName = systemTag.getValue();
} else {
// Logging is done for Workflow
programName = loggingContext.getSystemTagsMap().get(WorkflowLoggingContext.TAG_WORKFLOW_ID).getValue();
}
for (int i = 0; i < MAX; i++) {
LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) logger, Level.INFO, programName + "<img>-" + i, null, null);
Map<String, String> tagMap = Maps.newHashMap(Maps.transformValues(loggingContext.getSystemTagsMap(), TAG_TO_STRING_FUNCTION));
event.setMDCPropertyMap(tagMap);
logEvents.add(new LogEvent(event, new LogOffset(i, i)));
}
}
use of ch.qos.logback.classic.spi.LoggingEvent in project cdap by caskdata.
the class MockLogReader method generateLogs.
/**
* This method is used to generate the logs for program which are used for testing.
* Single call to this method would add {@link #MAX} number of events.
* First 20 events are generated without {@link ApplicationLoggingContext#TAG_RUN_ID} tag.
* For next 40 events, alternate event is tagged with {@code ApplicationLoggingContext#TAG_RUN_ID}.
* Last 20 events are not tagged with {@code ApplicationLoggingContext#TAG_RUN_ID}.
* All events are alternately marked as {@link Level#ERROR} and {@link Level#WARN}.
* All events are alternately tagged with "plugin", "program" and "system" as value of MDC property ".origin"
* All events are alternately tagged with "lifecycle" as value of MDC property "MDC:eventType
*/
private void generateLogs(LoggingContext loggingContext, ProgramId programId, ProgramRunStatus runStatus) throws InterruptedException {
// All possible values of " MDC property ".origin
String[] origins = { "plugin", "program", "system" };
String entityId = LoggingContextHelper.getEntityId(loggingContext).getValue();
StackTraceElement stackTraceElementNative = new StackTraceElement("co.cask.Test", "testMethod", null, -2);
RunId runId = null;
Long stopTs = null;
for (int i = 0; i < MAX; ++i) {
// Setup run id for event with ids >= 20
if (i == 20) {
runId = RunIds.generate(TimeUnit.SECONDS.toMillis(getMockTimeSecs(i)));
} else if (i == 60 && runStatus != ProgramRunStatus.RUNNING && runStatus != ProgramRunStatus.SUSPENDED) {
// Record stop time for run for 60th event, but still continue to record run in the other logging events.
stopTs = getMockTimeSecs(i);
}
LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), i % 2 == 0 ? Level.ERROR : Level.WARN, entityId + "<img>-" + i, null, null);
event.setTimeStamp(TimeUnit.SECONDS.toMillis(getMockTimeSecs(i)));
// Add runid to logging context
Map<String, String> tagMap = Maps.newHashMap(Maps.transformValues(loggingContext.getSystemTagsMap(), TAG_TO_STRING_FUNCTION));
if (runId != null && stopTs == null && i % 2 == 0) {
tagMap.put(ApplicationLoggingContext.TAG_RUN_ID, runId.getId());
}
// Determine the value of ".origin" property by (i % 3)
tagMap.put(".origin", origins[i % 3]);
if (i % 2 == 0) {
tagMap.put("MDC:eventType", "lifecycle");
}
if (i == 30) {
event.setCallerData(new StackTraceElement[] { stackTraceElementNative });
}
event.setMDCPropertyMap(tagMap);
logEvents.add(new LogEvent(event, new LogOffset(i, i)));
}
long startTs = RunIds.getTime(runId, TimeUnit.SECONDS);
if (programId != null) {
//noinspection ConstantConditions
runRecordMap.put(programId, new RunRecord(runId.getId(), startTs, stopTs, runStatus, null));
store.setStart(programId, runId.getId(), startTs);
if (stopTs != null) {
store.setStop(programId, runId.getId(), stopTs, runStatus);
}
}
}
use of ch.qos.logback.classic.spi.LoggingEvent in project cdap by caskdata.
the class KafkaLogProcessorPipelineTest method createLoggingEvent.
/**
* Creates a new {@link ILoggingEvent} with the given information.
*/
private ILoggingEvent createLoggingEvent(String loggerName, Level level, String message, long timestamp) {
LoggingEvent event = new LoggingEvent();
event.setLevel(level);
event.setLoggerName(loggerName);
event.setMessage(message);
event.setTimeStamp(timestamp);
return event;
}
use of ch.qos.logback.classic.spi.LoggingEvent in project cdap by caskdata.
the class LogFileManagerTest method testLogFileManager.
@Test
public void testLogFileManager() throws Exception {
int syncInterval = 1024 * 1024;
long maxLifeTimeMs = 50;
long maxFileSizeInBytes = 104857600;
DatasetManager datasetManager = new DefaultDatasetManager(injector.getInstance(DatasetFramework.class), NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(injector.getInstance(DatasetFramework.class)), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
LogFileManager logFileManager = new LogFileManager("700", "600", maxLifeTimeMs, maxFileSizeInBytes, syncInterval, fileMetaDataWriter, injector.getInstance(LocationFactory.class));
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("test", "testApp", "testFlow");
long timestamp = System.currentTimeMillis();
LogFileOutputStream outputStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1");
outputStream.append(event1);
// we are doing this, instead of calling getLogFileOutputStream to avoid race, if test machine can be slow.
Assert.assertNotNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
TimeUnit.MILLISECONDS.sleep(60);
logFileManager.flush();
// should be closed on flush, should return null
Assert.assertNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
LogFileOutputStream newLogOutStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
// make sure the new location we got is different
Assert.assertNotEquals(outputStream.getLocation(), newLogOutStream.getLocation());
}
use of ch.qos.logback.classic.spi.LoggingEvent in project cdap by caskdata.
the class KafkaOffsetResolverTest method createLoggingEvent.
/**
* Creates a new {@link ILoggingEvent} with the given information.
*/
private ILoggingEvent createLoggingEvent(String loggerName, Level level, String message, long timestamp) {
LoggingEvent event = new LoggingEvent();
event.setLevel(level);
event.setLoggerName(loggerName);
event.setMessage(message);
event.setTimeStamp(timestamp);
return event;
}
Aggregations