use of io.cdap.cdap.logging.write.LogLocation in project cdap by caskdata.
the class FileMetaDataReader method getFiles.
private List<LogLocation> getFiles(StructuredTable metaTable, LogPathIdentifier logPathIdentifier, long endTimestampMs) throws IOException {
// create scanner with
// start rowkey prefix:context:event-time(0):create-time(0)
// end rowkey prefix:context:event-time(endTimestamp):0(create-time doesn't matter for get files)
// add these files to the list
List<LogLocation> files = new ArrayList<>();
Range scanRange = Range.create(getKeyFields(logPathIdentifier.getRowkey(), 0L, 0L), Range.Bound.INCLUSIVE, getPartialKey(logPathIdentifier.getRowkey(), endTimestampMs), Range.Bound.INCLUSIVE);
try (CloseableIterator<StructuredRow> iter = metaTable.scan(scanRange, Integer.MAX_VALUE)) {
while (iter.hasNext()) {
StructuredRow row = iter.next();
files.add(fromRow(row, logPathIdentifier.getNamespaceId()));
}
}
return files;
}
use of io.cdap.cdap.logging.write.LogLocation in project cdap by caskdata.
the class FileLogReader method getLogPrev.
@Override
public void getLogPrev(final LoggingContext loggingContext, final ReadRange readRange, final int maxEvents, final Filter filter, final Callback callback) {
callback.init();
try {
Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext), filter));
List<LogLocation> sortedFilesInRange = fileMetadataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(loggingContext), readRange.getFromMillis(), readRange.getToMillis());
if (sortedFilesInRange.isEmpty()) {
return;
}
long fromTimeMs = readRange.getToMillis() - 1;
LOG.trace("Using fromTimeMs={}, readRange={}", fromTimeMs, readRange);
List<Collection<LogEvent>> logSegments = Lists.newLinkedList();
int count = 0;
for (LogLocation file : Lists.reverse(sortedFilesInRange)) {
try {
LOG.trace("Reading file {}", file);
Collection<LogEvent> events = file.readLogPrev(logFilter, fromTimeMs, maxEvents - count);
logSegments.add(events);
count += events.size();
if (count >= maxEvents) {
break;
}
} catch (IOException e) {
LOG.warn("Got exception reading log file {}", file, e);
}
}
for (LogEvent event : Iterables.concat(Lists.reverse(logSegments))) {
callback.handle(event);
}
} catch (Throwable e) {
LOG.error("Got exception: ", e);
throw Throwables.propagate(e);
}
}
use of io.cdap.cdap.logging.write.LogLocation in project cdap by caskdata.
the class CDAPLogAppenderTest method testCDAPLogAppender.
@Test
public void testCDAPLogAppender() {
int syncInterval = 1024 * 1024;
CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
cdapLogAppender.setSyncIntervalBytes(syncInterval);
cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
cdapLogAppender.setMaxFileSizeInBytes(104857600);
cdapLogAppender.setDirPermissions("700");
cdapLogAppender.setFilePermissions("600");
cdapLogAppender.setFileRetentionDurationDays(1);
cdapLogAppender.setLogCleanupIntervalMins(10);
cdapLogAppender.setFileCleanupBatchSize(100);
AppenderContext context = new LocalAppenderContext(injector.getInstance(TransactionRunner.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
context.start();
cdapLogAppender.setContext(context);
cdapLogAppender.start();
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
LoggingEvent event = new LoggingEvent("io.cdap.Test", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message", null, null);
Map<String, String> properties = new HashMap<>();
properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "default");
properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
properties.put(UserServiceLoggingContext.TAG_USER_SERVICE_ID, "testService");
event.setMDCPropertyMap(properties);
cdapLogAppender.doAppend(event);
cdapLogAppender.stop();
context.stop();
try {
List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
Assert.assertEquals(1, files.size());
LogLocation logLocation = files.get(0);
Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
Assert.assertTrue(logLocation.getLocation().exists());
CloseableIterator<LogEvent> logEventCloseableIterator = logLocation.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, Integer.MAX_VALUE);
int logCount = 0;
while (logEventCloseableIterator.hasNext()) {
logCount++;
LogEvent logEvent = logEventCloseableIterator.next();
Assert.assertEquals(event.getMessage(), logEvent.getLoggingEvent().getMessage());
}
logEventCloseableIterator.close();
Assert.assertEquals(1, logCount);
// checking permission
String expectedPermissions = "rw-------";
for (LogLocation file : files) {
Location location = file.getLocation();
Assert.assertEquals(expectedPermissions, location.getPermissions());
}
} catch (Exception e) {
Assert.fail();
}
}
use of io.cdap.cdap.logging.write.LogLocation in project cdap by caskdata.
the class FileMetadataTest method testFileMetadataReadWriteAcrossFormats.
@Test
public void testFileMetadataReadWriteAcrossFormats() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long eventTime = currentTime + 20;
long newCurrentTime = currentTime + 100;
// 10 files in new format
for (int i = 1; i <= 10; i++) {
fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTime + i, newCurrentTime + i, location.append("testFileNew" + Integer.toString(i)));
}
// reader test
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
// scan only in new files time range
List<LogLocation> locations = fileMetadataReader.listFiles(logPathIdentifier, eventTime + 2, eventTime + 6);
// should include files from currentTime (1..6)
Assert.assertEquals(6, locations.size());
for (LogLocation logLocation : locations) {
Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
}
// scan time range across formats
locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, eventTime + 6);
// should include files from new range (1..6)
Assert.assertEquals(6, locations.size());
for (int i = 0; i < locations.size(); i++) {
Assert.assertEquals(LogLocation.VERSION_1, locations.get(i).getFrameworkVersion());
Assert.assertEquals(location.append("testFileNew" + Integer.toString(i + 1)), locations.get(i).getLocation());
}
}
use of io.cdap.cdap.logging.write.LogLocation in project cdap by caskdata.
the class FileMetadataCleanerTest method testWithBatchSizeLargerThanNumOfFiles.
@Test
public void testWithBatchSizeLargerThanNumOfFiles() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(transactionRunner);
try {
LogPathIdentifier identifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", String.format("testFlow%s", 0));
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long newCurrentTime = currentTime + 100;
for (int j = 0; j < 10; j++) {
fileMetaDataWriter.writeMetaData(identifier, newCurrentTime + j, newCurrentTime + j, location.append("testFileNew" + Integer.toString(j)));
}
List<LogLocation> locations;
locations = fileMetadataReader.listFiles(identifier, newCurrentTime, newCurrentTime + 10);
// should include files from currentTime (0..9)
Assert.assertEquals(10, locations.size());
long tillTime = newCurrentTime + 4;
List<FileMetadataCleaner.DeletedEntry> deleteEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, 1000);
Assert.assertEquals(5, deleteEntries.size());
} finally {
// cleanup meta
deleteAllMetaEntries(transactionRunner);
}
}
Aggregations