Search in sources :

Example 1 with LogLocation

use of co.cask.cdap.logging.write.LogLocation in project cdap by caskdata.

the class FileMetadataTest method testFileMetadataReadWrite.

@Test
public void testFileMetadataReadWrite() throws Exception {
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
    long currentTime = System.currentTimeMillis();
    for (int i = 10; i <= 100; i += 10) {
        // i is the event time
        fileMetaDataWriter.writeMetaData(logPathIdentifier, i, currentTime, location.append(Integer.toString(i)));
    }
    // for the timestamp 80, add new new log path id with different current time.
    fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 1, location.append("81"));
    fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 2, location.append("82"));
    // reader test
    FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
    Assert.assertEquals(12, fileMetadataReader.listFiles(logPathIdentifier, 0, 100).size());
    Assert.assertEquals(5, fileMetadataReader.listFiles(logPathIdentifier, 20, 50).size());
    Assert.assertEquals(2, fileMetadataReader.listFiles(logPathIdentifier, 100, 150).size());
    // should include the latest file with event start time 80.
    List<LogLocation> locationList = fileMetadataReader.listFiles(logPathIdentifier, 81, 85);
    Assert.assertEquals(1, locationList.size());
    Assert.assertEquals(80, locationList.get(0).getEventTimeMs());
    Assert.assertEquals(location.append("82"), locationList.get(0).getLocation());
    Assert.assertEquals(1, fileMetadataReader.listFiles(logPathIdentifier, 150, 1000).size());
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) LogLocation(co.cask.cdap.logging.write.LogLocation) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 2 with LogLocation

use of co.cask.cdap.logging.write.LogLocation in project cdap by caskdata.

the class CDAPLogAppenderTest method testCDAPLogAppender.

@Test
public void testCDAPLogAppender() throws Exception {
    int syncInterval = 1024 * 1024;
    CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
    cdapLogAppender.setSyncIntervalBytes(syncInterval);
    cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
    cdapLogAppender.setMaxFileSizeInBytes(104857600);
    cdapLogAppender.setDirPermissions("700");
    cdapLogAppender.setFilePermissions("600");
    cdapLogAppender.setFileRetentionDurationDays(1);
    cdapLogAppender.setLogCleanupIntervalMins(10);
    cdapLogAppender.setFileCleanupTransactionTimeout(30);
    AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
    context.start();
    cdapLogAppender.setContext(context);
    cdapLogAppender.start();
    FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
    LoggingEvent event = new LoggingEvent("co.cask.Test", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message", null, null);
    Map<String, String> properties = new HashMap<>();
    properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "default");
    properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
    properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
    properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
    event.setMDCPropertyMap(properties);
    cdapLogAppender.doAppend(event);
    cdapLogAppender.stop();
    context.stop();
    try {
        List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
        Assert.assertEquals(1, files.size());
        LogLocation logLocation = files.get(0);
        Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
        Assert.assertTrue(logLocation.getLocation().exists());
        CloseableIterator<LogEvent> logEventCloseableIterator = logLocation.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, Integer.MAX_VALUE);
        int logCount = 0;
        while (logEventCloseableIterator.hasNext()) {
            logCount++;
            LogEvent logEvent = logEventCloseableIterator.next();
            Assert.assertEquals(event.getMessage(), logEvent.getLoggingEvent().getMessage());
        }
        logEventCloseableIterator.close();
        Assert.assertEquals(1, logCount);
        // checking permission
        String expectedPermissions = "rw-------";
        for (LogLocation file : files) {
            Location location = file.getLocation();
            Assert.assertEquals(expectedPermissions, location.getPermissions());
        }
    } catch (Exception e) {
        Assert.fail();
    }
}
Also used : HashMap(java.util.HashMap) LogEvent(co.cask.cdap.logging.read.LogEvent) NoOpMetricsCollectionService(co.cask.cdap.common.metrics.NoOpMetricsCollectionService) IOException(java.io.IOException) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) LoggingEvent(ch.qos.logback.classic.spi.LoggingEvent) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) LogLocation(co.cask.cdap.logging.write.LogLocation) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) AppenderContext(co.cask.cdap.api.logging.AppenderContext) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 3 with LogLocation

use of co.cask.cdap.logging.write.LogLocation in project cdap by caskdata.

the class FileMetaDataReader method getFilesInOldFormat.

private List<LogLocation> getFilesInOldFormat(Table metaTable, LogPathIdentifier logPathIdentifier, long endTimestampMs) throws Exception {
    List<LogLocation> files = new ArrayList<>();
    final Row cols = metaTable.get(getOldRowKey(logPathIdentifier));
    for (final Map.Entry<byte[], byte[]> entry : cols.getColumns().entrySet()) {
        // old rowkey format length is 8 bytes (just the event timestamp is the column key)
        if (entry.getKey().length == 8) {
            long eventTimestamp = Bytes.toLong(entry.getKey());
            if (eventTimestamp <= endTimestampMs) {
                Location fileLocation = impersonator.doAs(new NamespaceId(logPathIdentifier.getNamespaceId()), new Callable<Location>() {

                    @Override
                    public Location call() throws Exception {
                        // we stored uri in old format
                        return Locations.getLocationFromAbsolutePath(locationFactory, new URI(Bytes.toString(entry.getValue())).getPath());
                    }
                });
                // old format
                files.add(new LogLocation(LogLocation.VERSION_0, eventTimestamp, // use 0 as current time as this information is not available
                0, fileLocation, logPathIdentifier.getNamespaceId(), impersonator));
            }
        } else {
            LOG.warn("For row-key {}, got column entry with unexpected key length {}", logPathIdentifier.getOldRowkey(), entry.getKey().length);
        }
    }
    return files;
}
Also used : ArrayList(java.util.ArrayList) URI(java.net.URI) TransactionFailureException(org.apache.tephra.TransactionFailureException) URISyntaxException(java.net.URISyntaxException) LogLocation(co.cask.cdap.logging.write.LogLocation) Row(co.cask.cdap.api.dataset.table.Row) NamespaceId(co.cask.cdap.proto.id.NamespaceId) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation)

Example 4 with LogLocation

use of co.cask.cdap.logging.write.LogLocation in project cdap by caskdata.

the class FileMetaDataReader method getFilesInNewFormat.

private List<LogLocation> getFilesInNewFormat(Table metaTable, LogPathIdentifier logPathIdentifier, long endTimestampMs) throws URISyntaxException {
    // create scanner with
    // start rowkey prefix:context:event-time(0):create-time(0)
    // end rowkey  prefix:context:event-time(endTimestamp):0(create-time doesn't matter for get files)
    // add these files to the list
    List<LogLocation> files = new ArrayList<>();
    byte[] logPathIdBytes = Bytes.toBytes(logPathIdentifier.getRowkey());
    byte[] startRowKey = Bytes.concat(LoggingStoreTableUtil.NEW_FILE_META_ROW_KEY_PREFIX, logPathIdBytes, Bytes.toBytes(0L), Bytes.toBytes(0L));
    byte[] endRowKey = Bytes.concat(LoggingStoreTableUtil.NEW_FILE_META_ROW_KEY_PREFIX, logPathIdBytes, // end row-key is exclusive, so use endTimestamp + 1
    Bytes.toBytes(endTimestampMs + 1), Bytes.toBytes(0L));
    int prefixLength = LoggingStoreTableUtil.NEW_FILE_META_ROW_KEY_PREFIX.length + logPathIdBytes.length;
    try (Scanner scanner = metaTable.scan(startRowKey, endRowKey)) {
        Row row;
        while ((row = scanner.next()) != null) {
            // column value is the file location
            byte[] value = row.get(LoggingStoreTableUtil.META_TABLE_COLUMN_KEY);
            files.add(new LogLocation(LogLocation.VERSION_1, Bytes.toLong(row.getRow(), prefixLength, Bytes.SIZEOF_LONG), Bytes.toLong(row.getRow(), prefixLength + Bytes.SIZEOF_LONG, Bytes.SIZEOF_LONG), // we store path in new format
            Locations.getLocationFromAbsolutePath(locationFactory, (Bytes.toString(value))), logPathIdentifier.getNamespaceId(), impersonator));
        }
    }
    return files;
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) LogLocation(co.cask.cdap.logging.write.LogLocation) ArrayList(java.util.ArrayList) Row(co.cask.cdap.api.dataset.table.Row)

Example 5 with LogLocation

use of co.cask.cdap.logging.write.LogLocation in project cdap by caskdata.

the class FileLogReader method getLogPrev.

@Override
public void getLogPrev(final LoggingContext loggingContext, final ReadRange readRange, final int maxEvents, final Filter filter, final Callback callback) {
    callback.init();
    try {
        Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext), filter));
        List<LogLocation> sortedFilesInRange = fileMetadataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(loggingContext), readRange.getFromMillis(), readRange.getToMillis());
        if (sortedFilesInRange.isEmpty()) {
            return;
        }
        long fromTimeMs = readRange.getToMillis() - 1;
        LOG.trace("Using fromTimeMs={}, readRange={}", fromTimeMs, readRange);
        List<Collection<LogEvent>> logSegments = Lists.newLinkedList();
        int count = 0;
        for (LogLocation file : Lists.reverse(sortedFilesInRange)) {
            try {
                LOG.trace("Reading file {}", file);
                Collection<LogEvent> events = file.readLogPrev(logFilter, fromTimeMs, maxEvents - count);
                logSegments.add(events);
                count += events.size();
                if (count >= maxEvents) {
                    break;
                }
            } catch (IOException e) {
                LOG.warn("Got exception reading log file {}", file, e);
            }
        }
        for (LogEvent event : Iterables.concat(Lists.reverse(logSegments))) {
            callback.handle(event);
        }
    } catch (Throwable e) {
        LOG.error("Got exception: ", e);
        throw Throwables.propagate(e);
    }
}
Also used : AndFilter(co.cask.cdap.logging.filter.AndFilter) Filter(co.cask.cdap.logging.filter.Filter) AndFilter(co.cask.cdap.logging.filter.AndFilter) LogLocation(co.cask.cdap.logging.write.LogLocation) Collection(java.util.Collection) IOException(java.io.IOException)

Aggregations

LogLocation (co.cask.cdap.logging.write.LogLocation)13 FileMetaDataReader (co.cask.cdap.logging.meta.FileMetaDataReader)7 Test (org.junit.Test)7 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)6 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)6 Location (org.apache.twill.filesystem.Location)6 LocationFactory (org.apache.twill.filesystem.LocationFactory)6 IOException (java.io.IOException)5 LogPathIdentifier (co.cask.cdap.logging.appender.system.LogPathIdentifier)4 ArrayList (java.util.ArrayList)4 LoggingEvent (ch.qos.logback.classic.spi.LoggingEvent)3 Transactional (co.cask.cdap.api.Transactional)3 DatasetManager (co.cask.cdap.api.dataset.DatasetManager)3 AppenderContext (co.cask.cdap.api.logging.AppenderContext)3 NoOpMetricsCollectionService (co.cask.cdap.common.metrics.NoOpMetricsCollectionService)3 SystemDatasetInstantiator (co.cask.cdap.data.dataset.SystemDatasetInstantiator)3 DefaultDatasetManager (co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager)3 MultiThreadDatasetCache (co.cask.cdap.data2.dataset2.MultiThreadDatasetCache)3 AndFilter (co.cask.cdap.logging.filter.AndFilter)3 Filter (co.cask.cdap.logging.filter.Filter)3