use of io.cdap.cdap.logging.read.LogEvent in project cdap by cdapio.
the class TestFileLogging method testGetLog.
@Test
public void testGetLog() throws Exception {
// LogReader.getLog is tested in LogSaverTest for distributed mode
LoggingContext loggingContext = new WorkerLoggingContext("TFL_NS_1", "APP_1", "WORKER_1", "RUN1", "INSTANCE1");
FileLogReader logTail = injector.getInstance(FileLogReader.class);
LoggingTester.LogCallback logCallback1 = new LoggingTester.LogCallback();
logTail.getLogPrev(loggingContext, ReadRange.LATEST, 60, Filter.EMPTY_FILTER, logCallback1);
List<LogEvent> allEvents = logCallback1.getEvents();
Assert.assertEquals(60, allEvents.size());
List<LogEvent> events = Lists.newArrayList(logTail.getLog(loggingContext, allEvents.get(10).getLoggingEvent().getTimeStamp(), allEvents.get(15).getLoggingEvent().getTimeStamp(), Filter.EMPTY_FILTER));
Assert.assertEquals(5, events.size());
Assert.assertEquals(allEvents.get(10).getLoggingEvent().getFormattedMessage(), events.get(0).getLoggingEvent().getFormattedMessage());
Assert.assertEquals(allEvents.get(14).getLoggingEvent().getFormattedMessage(), events.get(4).getLoggingEvent().getFormattedMessage());
events = Lists.newArrayList(logTail.getLog(loggingContext, allEvents.get(0).getLoggingEvent().getTimeStamp(), allEvents.get(59).getLoggingEvent().getTimeStamp(), Filter.EMPTY_FILTER));
Assert.assertEquals(59, events.size());
Assert.assertEquals(allEvents.get(0).getLoggingEvent().getFormattedMessage(), events.get(0).getLoggingEvent().getFormattedMessage());
Assert.assertEquals(allEvents.get(58).getLoggingEvent().getFormattedMessage(), events.get(58).getLoggingEvent().getFormattedMessage());
events = Lists.newArrayList(logTail.getLog(loggingContext, allEvents.get(12).getLoggingEvent().getTimeStamp(), allEvents.get(41).getLoggingEvent().getTimeStamp(), Filter.EMPTY_FILTER));
Assert.assertEquals(29, events.size());
Assert.assertEquals(allEvents.get(12).getLoggingEvent().getFormattedMessage(), events.get(0).getLoggingEvent().getFormattedMessage());
Assert.assertEquals(allEvents.get(40).getLoggingEvent().getFormattedMessage(), events.get(28).getLoggingEvent().getFormattedMessage());
events = Lists.newArrayList(logTail.getLog(loggingContext, allEvents.get(22).getLoggingEvent().getTimeStamp(), allEvents.get(38).getLoggingEvent().getTimeStamp(), Filter.EMPTY_FILTER));
Assert.assertEquals(16, events.size());
Assert.assertEquals(allEvents.get(22).getLoggingEvent().getFormattedMessage(), events.get(0).getLoggingEvent().getFormattedMessage());
Assert.assertEquals(allEvents.get(37).getLoggingEvent().getFormattedMessage(), events.get(15).getLoggingEvent().getFormattedMessage());
events = Lists.newArrayList(logTail.getLog(loggingContext, allEvents.get(41).getLoggingEvent().getTimeStamp(), allEvents.get(59).getLoggingEvent().getTimeStamp(), Filter.EMPTY_FILTER));
Assert.assertEquals(18, events.size());
Assert.assertEquals(allEvents.get(41).getLoggingEvent().getFormattedMessage(), events.get(0).getLoggingEvent().getFormattedMessage());
Assert.assertEquals(allEvents.get(58).getLoggingEvent().getFormattedMessage(), events.get(17).getLoggingEvent().getFormattedMessage());
// Try with null run id, should get all logs for WORKER_1
LoggingContext loggingContext1 = new WorkerLoggingContext("TFL_NS_1", "APP_1", "WORKER_1", null, "INSTANCE1");
events = Lists.newArrayList(logTail.getLog(loggingContext1, 0, Long.MAX_VALUE, Filter.EMPTY_FILTER));
Assert.assertEquals(120, events.size());
}
use of io.cdap.cdap.logging.read.LogEvent in project cdap by cdapio.
the class AbstractLogHttpHandler method doGetLogs.
protected void doGetLogs(LogReader logReader, HttpResponder responder, LoggingContext loggingContext, long fromTimeSecsParam, long toTimeSecsParam, boolean escape, String filterStr, @Nullable RunRecordDetail runRecord, String format, List<String> fieldsToSuppress) {
try {
TimeRange timeRange = parseTime(fromTimeSecsParam, toTimeSecsParam, responder);
if (timeRange == null) {
return;
}
Filter filter = FilterParser.parse(filterStr);
ReadRange readRange = new ReadRange(timeRange.getFromMillis(), timeRange.getToMillis(), LogOffset.INVALID_KAFKA_OFFSET);
readRange = adjustReadRange(readRange, runRecord, fromTimeSecsParam != -1);
try {
// the iterator is closed by the BodyProducer passed to the HttpResponder
CloseableIterator<LogEvent> logIter = logReader.getLog(loggingContext, readRange.getFromMillis(), readRange.getToMillis(), filter);
AbstractChunkedLogProducer logsProducer = getFullLogsProducer(format, logIter, fieldsToSuppress, escape);
responder.sendContent(HttpResponseStatus.OK, logsProducer, logsProducer.getResponseHeaders());
} catch (Exception ex) {
LOG.debug("Exception while reading logs for logging context {}", loggingContext, ex);
responder.sendStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
}
} catch (SecurityException e) {
responder.sendStatus(HttpResponseStatus.UNAUTHORIZED);
} catch (IllegalArgumentException e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, e.getMessage());
}
}
use of io.cdap.cdap.logging.read.LogEvent in project cdap by cdapio.
the class LocalLogAppenderResilientTest method testResilientLogging.
@Test
public void testResilientLogging() throws Exception {
Configuration hConf = new Configuration();
CConfiguration cConf = CConfiguration.create();
File datasetDir = new File(tmpFolder.newFolder(), "datasetUser");
// noinspection ResultOfMethodCallIgnored
datasetDir.mkdirs();
cConf.set(Constants.Dataset.Manager.OUTPUT_DIR, datasetDir.getAbsolutePath());
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.set(Constants.Dataset.Executor.ADDRESS, "localhost");
cConf.setInt(Constants.Dataset.Executor.PORT, Networks.getRandomPort());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), RemoteAuthenticatorModules.getNoOpModule(), new IOModule(), new ZKClientModule(), new KafkaClientModule(), new InMemoryDiscoveryModule(), new NonCustomLocationUnitTestModule(), new DataFabricModules().getInMemoryModules(), new DataSetsModules().getStandaloneModules(), new DataSetServiceModules().getInMemoryModules(), new TransactionMetricsModule(), new ExploreClientModule(), new LocalLogAppenderModule(), new NamespaceAdminTestModule(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new AbstractModule() {
@Override
protected void configure() {
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(NoOpOwnerAdmin.class);
bind(MetadataServiceClient.class).to(NoOpMetadataServiceClient.class);
}
});
TransactionManager txManager = injector.getInstance(TransactionManager.class);
txManager.startAndWait();
StoreDefinition.createAllTables(injector.getInstance(StructuredTableAdmin.class));
DatasetOpExecutorService opExecutorService = injector.getInstance(DatasetOpExecutorService.class);
opExecutorService.startAndWait();
// Start the logging before starting the service.
LoggingContextAccessor.setLoggingContext(new WorkerLoggingContext("TRL_ACCT_1", "APP_1", "WORKER_1", "RUN", "INSTANCE"));
String logBaseDir = "trl-log/log_files_" + new Random(System.currentTimeMillis()).nextLong();
cConf.set(LoggingConfiguration.LOG_BASE_DIR, logBaseDir);
cConf.setInt(LoggingConfiguration.LOG_MAX_FILE_SIZE_BYTES, 20 * 1024);
final LogAppender appender = injector.getInstance(LocalLogAppender.class);
new LogAppenderInitializer(appender).initialize("TestResilientLogging");
int failureMsgCount = 3;
final CountDownLatch failureLatch = new CountDownLatch(failureMsgCount);
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
loggerContext.getStatusManager().add(new StatusListener() {
@Override
public void addStatusEvent(Status status) {
if (status.getLevel() != Status.ERROR || status.getOrigin() != appender) {
return;
}
Throwable cause = status.getThrowable();
if (cause != null) {
Throwable rootCause = Throwables.getRootCause(cause);
if (rootCause instanceof ServiceUnavailableException) {
String serviceName = ((ServiceUnavailableException) rootCause).getServiceName();
if (Constants.Service.DATASET_MANAGER.equals(serviceName)) {
failureLatch.countDown();
}
}
}
}
});
Logger logger = LoggerFactory.getLogger("TestResilientLogging");
for (int i = 0; i < failureMsgCount; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
// Wait for the three failure to append to happen
// The wait time has to be > 3 seconds because DatasetServiceClient has 1 second timeout on discovery
failureLatch.await(5, TimeUnit.SECONDS);
// Start dataset service, wait for it to be discoverable
DatasetService dsService = injector.getInstance(DatasetService.class);
dsService.startAndWait();
final CountDownLatch startLatch = new CountDownLatch(1);
DiscoveryServiceClient discoveryClient = injector.getInstance(DiscoveryServiceClient.class);
discoveryClient.discover(Constants.Service.DATASET_MANAGER).watchChanges(new ServiceDiscovered.ChangeListener() {
@Override
public void onChange(ServiceDiscovered serviceDiscovered) {
if (!Iterables.isEmpty(serviceDiscovered)) {
startLatch.countDown();
}
}
}, Threads.SAME_THREAD_EXECUTOR);
startLatch.await(5, TimeUnit.SECONDS);
// Do some more logging after the service is started.
for (int i = 5; i < 10; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
appender.stop();
// Verify - we should have at least 5 events.
LoggingContext loggingContext = new WorkerLoggingContext("TRL_ACCT_1", "APP_1", "WORKER_1", "RUN", "INSTANCE");
FileLogReader logTail = injector.getInstance(FileLogReader.class);
LoggingTester.LogCallback logCallback1 = new LoggingTester.LogCallback();
logTail.getLogPrev(loggingContext, ReadRange.LATEST, 10, Filter.EMPTY_FILTER, logCallback1);
List<LogEvent> allEvents = logCallback1.getEvents();
Assert.assertTrue(allEvents.toString(), allEvents.size() >= 5);
// Finally - stop all services
Services.chainStop(dsService, opExecutorService, txManager);
}
use of io.cdap.cdap.logging.read.LogEvent in project cdap by cdapio.
the class LogLocation method readToEndSyncPosition.
/**
* Read current block in Avro file from current block sync marker to next block sync marker
*/
private List<LogEvent> readToEndSyncPosition(DataFileReader<GenericRecord> dataFileReader, Filter logFilter, long fromTimeMs, long endSyncPosition) throws IOException {
List<LogEvent> logSegment = new ArrayList<>();
long currentSyncPosition = dataFileReader.previousSync();
// or read until endSyncPosition has been reached
while (dataFileReader.hasNext() && (endSyncPosition == -1 || (currentSyncPosition < endSyncPosition))) {
ILoggingEvent loggingEvent = new LoggingEvent(dataFileReader.next());
loggingEvent.prepareForDeferredProcessing();
// Stop when reached fromTimeMs
if (loggingEvent.getTimeStamp() > fromTimeMs) {
break;
}
if (logFilter.match(loggingEvent)) {
logSegment.add(new LogEvent(loggingEvent, new LogOffset(LogOffset.INVALID_KAFKA_OFFSET, loggingEvent.getTimeStamp())));
}
currentSyncPosition = dataFileReader.previousSync();
}
return logSegment;
}
use of io.cdap.cdap.logging.read.LogEvent in project cdap by cdapio.
the class MockLogReader method getLogPrev.
@Override
public void getLogPrev(LoggingContext loggingContext, ReadRange readRange, int maxEvents, Filter filter, Callback callback) {
if (readRange.getKafkaOffset() < 0) {
readRange = new ReadRange(readRange.getFromMillis(), readRange.getToMillis(), MAX);
}
Filter contextFilter = LoggingContextHelper.createFilter(loggingContext);
callback.init();
try {
int count = 0;
long startOffset = readRange.getKafkaOffset() - maxEvents;
for (LogEvent logLine : logEvents) {
long logTime = logLine.getLoggingEvent().getTimeStamp();
if (!contextFilter.match(logLine.getLoggingEvent()) || logTime < readRange.getFromMillis() || logTime >= readRange.getToMillis()) {
continue;
}
if (logLine.getOffset().getKafkaOffset() >= startOffset && logLine.getOffset().getKafkaOffset() < readRange.getKafkaOffset()) {
if (++count > maxEvents) {
break;
}
if (filter != Filter.EMPTY_FILTER && logLine.getOffset().getKafkaOffset() % 2 != 0) {
continue;
}
callback.handle(logLine);
}
}
} catch (Throwable e) {
LOG.error("Got exception", e);
} finally {
callback.close();
}
}
Aggregations