use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class CDAPLogAppenderTest method testCDAPLogAppender.
@Test
public void testCDAPLogAppender() throws Exception {
int syncInterval = 1024 * 1024;
CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
cdapLogAppender.setSyncIntervalBytes(syncInterval);
cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
cdapLogAppender.setMaxFileSizeInBytes(104857600);
cdapLogAppender.setDirPermissions("700");
cdapLogAppender.setFilePermissions("600");
cdapLogAppender.setFileRetentionDurationDays(1);
cdapLogAppender.setLogCleanupIntervalMins(10);
cdapLogAppender.setFileCleanupTransactionTimeout(30);
AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
context.start();
cdapLogAppender.setContext(context);
cdapLogAppender.start();
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message", null, null);
Map<String, String> properties = new HashMap<>();
properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "default");
properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
event.setMDCPropertyMap(properties);
cdapLogAppender.doAppend(event);
cdapLogAppender.stop();
context.stop();
try {
List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
Assert.assertEquals(1, files.size());
LogLocation logLocation = files.get(0);
Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
Assert.assertTrue(logLocation.getLocation().exists());
CloseableIterator<LogEvent> logEventCloseableIterator = logLocation.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, Integer.MAX_VALUE);
int logCount = 0;
while (logEventCloseableIterator.hasNext()) {
logCount++;
LogEvent logEvent = logEventCloseableIterator.next();
Assert.assertEquals(event.getMessage(), logEvent.getLoggingEvent().getMessage());
}
logEventCloseableIterator.close();
Assert.assertEquals(1, logCount);
// checking permission
String expectedPermissions = "rw-------";
for (LogLocation file : files) {
Location location = file.getLocation();
Assert.assertEquals(expectedPermissions, location.getPermissions());
}
} catch (Exception e) {
Assert.fail();
}
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class CDAPLogAppenderTest method assertLogEventDetails.
private void assertLogEventDetails(LoggingEvent expectedLoggingEvent, LogLocation logLocation) throws IOException {
Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
Assert.assertTrue(logLocation.getLocation().exists());
CloseableIterator<LogEvent> logEventCloseableIterator = logLocation.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, Integer.MAX_VALUE);
int logCount = 0;
while (logEventCloseableIterator.hasNext()) {
logCount++;
LogEvent logEvent = logEventCloseableIterator.next();
Assert.assertEquals(expectedLoggingEvent.getMessage(), logEvent.getLoggingEvent().getMessage());
}
logEventCloseableIterator.close();
Assert.assertEquals(1, logCount);
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class LocalLogAppenderResilientTest method testResilientLogging.
@Test
public void testResilientLogging() throws Exception {
Configuration hConf = new Configuration();
CConfiguration cConf = CConfiguration.create();
File datasetDir = new File(tmpFolder.newFolder(), "datasetUser");
//noinspection ResultOfMethodCallIgnored
datasetDir.mkdirs();
cConf.set(Constants.Dataset.Manager.OUTPUT_DIR, datasetDir.getAbsolutePath());
cConf.set(Constants.Service.MASTER_SERVICES_BIND_ADDRESS, "localhost");
cConf.set(Constants.Dataset.Executor.ADDRESS, "localhost");
cConf.setInt(Constants.Dataset.Executor.PORT, Networks.getRandomPort());
cConf.set(Constants.CFG_LOCAL_DATA_DIR, tmpFolder.newFolder().getAbsolutePath());
Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new IOModule(), new ZKClientModule(), new KafkaClientModule(), new DiscoveryRuntimeModule().getInMemoryModules(), new NonCustomLocationUnitTestModule().getModule(), new DataFabricModules().getInMemoryModules(), new DataSetsModules().getStandaloneModules(), new DataSetServiceModules().getInMemoryModules(), new TransactionMetricsModule(), new ExploreClientModule(), new LoggingModules().getInMemoryModules(), new NamespaceClientRuntimeModule().getInMemoryModules(), new AuthorizationTestModule(), new AuthorizationEnforcementModule().getInMemoryModules(), new AuthenticationContextModules().getMasterModule(), new AbstractModule() {
@Override
protected void configure() {
bind(UGIProvider.class).to(UnsupportedUGIProvider.class);
bind(OwnerAdmin.class).to(NoOpOwnerAdmin.class);
}
});
TransactionManager txManager = injector.getInstance(TransactionManager.class);
txManager.startAndWait();
DatasetOpExecutorService opExecutorService = injector.getInstance(DatasetOpExecutorService.class);
opExecutorService.startAndWait();
// Start the logging before starting the service.
LoggingContextAccessor.setLoggingContext(new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "FLOWLET_1", "RUN", "INSTANCE"));
String logBaseDir = "trl-log/log_files_" + new Random(System.currentTimeMillis()).nextLong();
cConf.set(LoggingConfiguration.LOG_BASE_DIR, logBaseDir);
cConf.setInt(LoggingConfiguration.LOG_MAX_FILE_SIZE_BYTES, 20 * 1024);
final LogAppender appender = injector.getInstance(LocalLogAppender.class);
new LogAppenderInitializer(appender).initialize("TestResilientLogging");
int failureMsgCount = 3;
final CountDownLatch failureLatch = new CountDownLatch(failureMsgCount);
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
loggerContext.getStatusManager().add(new StatusListener() {
@Override
public void addStatusEvent(Status status) {
if (status.getLevel() != Status.ERROR || status.getOrigin() != appender) {
return;
}
Throwable cause = status.getThrowable();
if (cause != null) {
Throwable rootCause = Throwables.getRootCause(cause);
if (rootCause instanceof ServiceUnavailableException) {
String serviceName = ((ServiceUnavailableException) rootCause).getServiceName();
if (Constants.Service.DATASET_MANAGER.equals(serviceName)) {
failureLatch.countDown();
}
}
}
}
});
Logger logger = LoggerFactory.getLogger("TestResilientLogging");
for (int i = 0; i < failureMsgCount; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
// Wait for the three failure to append to happen
// The wait time has to be > 3 seconds because DatasetServiceClient has 1 second timeout on discovery
failureLatch.await(5, TimeUnit.SECONDS);
// Start dataset service, wait for it to be discoverable
DatasetService dsService = injector.getInstance(DatasetService.class);
dsService.startAndWait();
final CountDownLatch startLatch = new CountDownLatch(1);
DiscoveryServiceClient discoveryClient = injector.getInstance(DiscoveryServiceClient.class);
discoveryClient.discover(Constants.Service.DATASET_MANAGER).watchChanges(new ServiceDiscovered.ChangeListener() {
@Override
public void onChange(ServiceDiscovered serviceDiscovered) {
if (!Iterables.isEmpty(serviceDiscovered)) {
startLatch.countDown();
}
}
}, Threads.SAME_THREAD_EXECUTOR);
startLatch.await(5, TimeUnit.SECONDS);
// Do some more logging after the service is started.
for (int i = 5; i < 10; ++i) {
Exception e1 = new Exception("Test Exception1");
Exception e2 = new Exception("Test Exception2", e1);
logger.warn("Test log message " + i + " {} {}", "arg1", "arg2", e2);
}
appender.stop();
// Verify - we should have at least 5 events.
LoggingContext loggingContext = new FlowletLoggingContext("TRL_ACCT_1", "APP_1", "FLOW_1", "", "RUN", "INSTANCE");
FileLogReader logTail = injector.getInstance(FileLogReader.class);
LoggingTester.LogCallback logCallback1 = new LoggingTester.LogCallback();
logTail.getLogPrev(loggingContext, ReadRange.LATEST, 10, Filter.EMPTY_FILTER, logCallback1);
List<LogEvent> allEvents = logCallback1.getEvents();
Assert.assertTrue(allEvents.toString(), allEvents.size() >= 5);
// Finally - stop all services
Services.chainStop(dsService, opExecutorService, txManager);
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class LogLocation method readToEndSyncPosition.
/**
* Read current block in Avro file from current block sync marker to next block sync marker
*/
private List<LogEvent> readToEndSyncPosition(DataFileReader<GenericRecord> dataFileReader, Filter logFilter, long fromTimeMs, long endSyncPosition) throws IOException {
List<LogEvent> logSegment = new ArrayList<>();
long currentSyncPosition = dataFileReader.previousSync();
// or read until endSyncPosition has been reached
while (dataFileReader.hasNext() && (endSyncPosition == -1 || (currentSyncPosition < endSyncPosition))) {
ILoggingEvent loggingEvent = new LoggingEvent(dataFileReader.next());
loggingEvent.prepareForDeferredProcessing();
// Stop when reached fromTimeMs
if (loggingEvent.getTimeStamp() > fromTimeMs) {
break;
}
if (logFilter.match(loggingEvent)) {
logSegment.add(new LogEvent(loggingEvent, new LogOffset(LogOffset.INVALID_KAFKA_OFFSET, loggingEvent.getTimeStamp())));
}
currentSyncPosition = dataFileReader.previousSync();
}
return logSegment;
}
use of co.cask.cdap.logging.read.LogEvent in project cdap by caskdata.
the class MockLogReader method getLog.
@Override
public CloseableIterator<LogEvent> getLog(LoggingContext loggingContext, long fromTimeMs, long toTimeMs, Filter filter) {
CollectingCallback collectingCallback = new CollectingCallback();
// since its just for test case, we don't need to lazily read logs (which is the purpose of returning an Iterator)
long fromOffset = getOffset(fromTimeMs / 1000);
long toOffset = getOffset(toTimeMs / 1000);
getLogNext(loggingContext, new ReadRange(fromTimeMs, toTimeMs, fromOffset), (int) (toOffset - fromOffset), filter, collectingCallback);
final Iterator<LogEvent> iterator = collectingCallback.getLogEvents().iterator();
return new CloseableIterator<LogEvent>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public LogEvent next() {
return iterator.next();
}
@Override
public void remove() {
iterator.remove();
}
@Override
public void close() {
// no-op
}
};
}
Aggregations