Search in sources :

Example 21 with LoggingContext

use of co.cask.cdap.common.logging.LoggingContext in project cdap by caskdata.

the class LogAppender method append.

public final void append(ILoggingEvent eventObject) {
    LoggingContext loggingContext;
    // If the context is already setup, use the context (in async mode).
    if (eventObject instanceof LogMessage) {
        loggingContext = ((LogMessage) eventObject).getLoggingContext();
    } else {
        loggingContext = LoggingContextAccessor.getLoggingContext();
        if (loggingContext == null) {
            return;
        }
    }
    LogMessage logMessage = new LogMessage(eventObject, loggingContext);
    addExtraTags(logMessage);
    appendEvent(logMessage);
}
Also used : LoggingContext(co.cask.cdap.common.logging.LoggingContext)

Example 22 with LoggingContext

use of co.cask.cdap.common.logging.LoggingContext in project cdap by caskdata.

the class DistributedLogFrameworkTest method testFramework.

@Test
public void testFramework() throws Exception {
    DistributedLogFramework framework = injector.getInstance(DistributedLogFramework.class);
    CConfiguration cConf = injector.getInstance(CConfiguration.class);
    framework.startAndWait();
    // Send some logs to Kafka.
    LoggingContext context = new ServiceLoggingContext(NamespaceId.SYSTEM.getNamespace(), Constants.Logging.COMPONENT_NAME, "test");
    // Make sure all events get flushed in the same batch
    long eventTimeBase = System.currentTimeMillis() + cConf.getInt(Constants.Logging.PIPELINE_EVENT_DELAY_MS);
    final int msgCount = 50;
    for (int i = 0; i < msgCount; i++) {
        // Publish logs in descending timestamp order
        publishLog(cConf.get(Constants.Logging.KAFKA_TOPIC), context, ImmutableList.of(createLoggingEvent("co.cask.test." + i, Level.INFO, "Testing " + i, eventTimeBase - i)));
    }
    // Read the logs back. They should be sorted by timestamp order.
    final FileMetaDataReader metaDataReader = injector.getInstance(FileMetaDataReader.class);
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            List<LogLocation> locations = metaDataReader.listFiles(new LogPathIdentifier(NamespaceId.SYSTEM.getNamespace(), Constants.Logging.COMPONENT_NAME, "test"), 0, Long.MAX_VALUE);
            if (locations.size() != 1) {
                return false;
            }
            LogLocation location = locations.get(0);
            int i = 0;
            try {
                try (CloseableIterator<LogEvent> iter = location.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, msgCount)) {
                    while (iter.hasNext()) {
                        String expectedMsg = "Testing " + (msgCount - i - 1);
                        LogEvent event = iter.next();
                        if (!expectedMsg.equals(event.getLoggingEvent().getMessage())) {
                            return false;
                        }
                        i++;
                    }
                    return i == msgCount;
                }
            } catch (Exception e) {
                // and the time when actual content are flushed to the file
                return false;
            }
        }
    }, 10, TimeUnit.SECONDS, msgCount, TimeUnit.MILLISECONDS);
    framework.stopAndWait();
    // Check the checkpoint is persisted correctly. Since all messages are processed,
    // the checkpoint should be the same as the message count.
    Checkpoint checkpoint = injector.getInstance(CheckpointManagerFactory.class).create(cConf.get(Constants.Logging.KAFKA_TOPIC), Bytes.toBytes(100)).getCheckpoint(0);
    Assert.assertEquals(msgCount, checkpoint.getNextOffset());
}
Also used : CloseableIterator(co.cask.cdap.api.dataset.lib.CloseableIterator) LoggingContext(co.cask.cdap.common.logging.LoggingContext) ServiceLoggingContext(co.cask.cdap.common.logging.ServiceLoggingContext) LogEvent(co.cask.cdap.logging.read.LogEvent) ServiceLoggingContext(co.cask.cdap.common.logging.ServiceLoggingContext) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Checkpoint(co.cask.cdap.logging.meta.Checkpoint) IOException(java.io.IOException) Checkpoint(co.cask.cdap.logging.meta.Checkpoint) LogLocation(co.cask.cdap.logging.write.LogLocation) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Test(org.junit.Test)

Example 23 with LoggingContext

use of co.cask.cdap.common.logging.LoggingContext in project cdap by caskdata.

the class FileMetadataTest method testFileMetadataReadWriteAcrossFormats.

@Test
public void testFileMetadataReadWriteAcrossFormats() throws Exception {
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataManager fileMetaDataManager = injector.getInstance(FileMetaDataManager.class);
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
    long currentTime = System.currentTimeMillis();
    LoggingContext loggingContext = LoggingContextHelper.getLoggingContext(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    // 10 files in old format
    for (int i = 1; i <= 10; i++) {
        // i is the event time
        fileMetaDataManager.writeMetaData(loggingContext, currentTime + i, location.append("testFile" + Integer.toString(i)));
    }
    long eventTime = currentTime + 20;
    long newCurrentTime = currentTime + 100;
    // 10 files in new format
    for (int i = 1; i <= 10; i++) {
        fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTime + i, newCurrentTime + i, location.append("testFileNew" + Integer.toString(i)));
    }
    // reader test
    FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
    // scan only in old files time range
    List<LogLocation> locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, currentTime + 6);
    // should include files from currentTime (1..6)
    Assert.assertEquals(6, locations.size());
    for (LogLocation logLocation : locations) {
        Assert.assertEquals(LogLocation.VERSION_0, logLocation.getFrameworkVersion());
    }
    // scan only in new files time range
    locations = fileMetadataReader.listFiles(logPathIdentifier, eventTime + 2, eventTime + 6);
    // should include files from currentTime (1..6)
    Assert.assertEquals(6, locations.size());
    for (LogLocation logLocation : locations) {
        Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
    }
    // scan time range across formats
    locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, eventTime + 6);
    // should include files from old range (1..10) and new range (1..6)
    Assert.assertEquals(16, locations.size());
    for (int i = 0; i < locations.size(); i++) {
        if (i < 10) {
            Assert.assertEquals(LogLocation.VERSION_0, locations.get(i).getFrameworkVersion());
            Assert.assertEquals(location.append("testFile" + Integer.toString(i + 1)), locations.get(i).getLocation());
        } else {
            Assert.assertEquals(LogLocation.VERSION_1, locations.get(i).getFrameworkVersion());
            Assert.assertEquals(location.append("testFileNew" + Integer.toString(i - 9)), locations.get(i).getLocation());
        }
    }
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) LoggingContext(co.cask.cdap.common.logging.LoggingContext) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) LogLocation(co.cask.cdap.logging.write.LogLocation) FileMetaDataManager(co.cask.cdap.logging.write.FileMetaDataManager) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 24 with LoggingContext

use of co.cask.cdap.common.logging.LoggingContext in project cdap by caskdata.

the class TestKafkaLogging method testPartitionKey.

// Note: LogReader.getLog is tested in LogSaverTest for distributed mode
@Test
public void testPartitionKey() throws Exception {
    CConfiguration cConf = KAFKA_TESTER.getCConf();
    // set kafka partition key to application
    cConf.set(Constants.Logging.LOG_PUBLISH_PARTITION_KEY, "application");
    Logger logger = LoggerFactory.getLogger("TestKafkaLogging");
    LoggingContext loggingContext = new FlowletLoggingContext("TKL_NS_2", "APP_2", "FLOW_2", "FLOWLET_2", "RUN2", "INSTANCE2");
    LoggingContextAccessor.setLoggingContext(loggingContext);
    for (int i = 0; i < 40; ++i) {
        logger.warn("TKL_NS_2 Test log message {} {} {}", i, "arg1", "arg2", new Exception("test exception"));
    }
    loggingContext = new FlowletLoggingContext("TKL_NS_2", "APP_2", "FLOW_3", "FLOWLET_3", "RUN3", "INSTANCE3");
    LoggingContextAccessor.setLoggingContext(loggingContext);
    for (int i = 0; i < 40; ++i) {
        logger.warn("TKL_NS_2 Test log message {} {} {}", i, "arg1", "arg2", new Exception("test exception"));
    }
    final Multimap<Integer, String> actual = ArrayListMultimap.create();
    KAFKA_TESTER.getPublishedMessages(KAFKA_TESTER.getCConf().get(Constants.Logging.KAFKA_TOPIC), ImmutableSet.of(0, 1), 40, new Function<FetchedMessage, String>() {

        @Override
        public String apply(final FetchedMessage input) {
            try {
                Map.Entry<Integer, String> entry = convertFetchedMessage(input);
                actual.put(entry.getKey(), entry.getValue());
            } catch (IOException e) {
            // should never happen
            }
            return "";
        }
    });
    boolean isPresent = false;
    // check if all the logs from same app went to same partition
    for (Map.Entry<Integer, Collection<String>> entry : actual.asMap().entrySet()) {
        if (entry.getValue().contains("TKL_NS_2:APP_2")) {
            if (isPresent) {
                // if we have already found another partition with application context, assert false
                Assert.assertFalse("Only one partition should have application logging context", isPresent);
            }
            isPresent = true;
        }
    }
    // reset kafka partition key
    cConf.set(Constants.Logging.LOG_PUBLISH_PARTITION_KEY, "program");
}
Also used : LoggingContext(co.cask.cdap.common.logging.LoggingContext) FlowletLoggingContext(co.cask.cdap.logging.context.FlowletLoggingContext) IOException(java.io.IOException) Logger(org.slf4j.Logger) CConfiguration(co.cask.cdap.common.conf.CConfiguration) IOException(java.io.IOException) Collection(java.util.Collection) FetchedMessage(org.apache.twill.kafka.client.FetchedMessage) FlowletLoggingContext(co.cask.cdap.logging.context.FlowletLoggingContext) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 25 with LoggingContext

use of co.cask.cdap.common.logging.LoggingContext in project cdap by caskdata.

the class FileMetadataCleanerTest method testScanAndDeleteOldMetadata.

@Test
public void testScanAndDeleteOldMetadata() throws Exception {
    // use file meta data manager to write meta data in old format
    // use file meta writer to write meta data in new format
    // scan for old files and make sure we only get the old meta data entries.
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    FileMetaDataManager fileMetaDataManager = injector.getInstance(FileMetaDataManager.class);
    LoggingContext flowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testFlow", ProgramType.FLOW);
    long eventTimestamp = System.currentTimeMillis();
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location testLocation = locationFactory.create("testFile");
    try {
        // write 50 entries in old format
        for (int i = 0; i < 50; i++) {
            fileMetaDataManager.writeMetaData(flowContext, eventTimestamp + i, testLocation);
        }
        LoggingContext wflowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testWflow", ProgramType.WORKFLOW);
        fileMetaDataManager.writeMetaData(wflowContext, eventTimestamp, testLocation);
        LoggingContext mrContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testMR", ProgramType.MAPREDUCE);
        fileMetaDataManager.writeMetaData(mrContext, eventTimestamp, testLocation);
        LoggingContext sparkContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testSpark", ProgramType.SPARK);
        fileMetaDataManager.writeMetaData(sparkContext, eventTimestamp, testLocation);
        // write 50 entries in new format
        long newEventTime = eventTimestamp + 1000;
        long currentTime = newEventTime + 1000;
        LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs", "testApp", "testFlow");
        for (int i = 50; i < 100; i++) {
            fileMetaDataWriter.writeMetaData(logPathIdentifier, newEventTime + i, currentTime + i, testLocation);
        }
        FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
        Assert.assertEquals(50, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(flowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
        FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
        fileMetadataCleaner.scanAndDeleteOldMetaData(TRANSACTION_TIMEOUT, CUTOFF_TIME_TRANSACTION);
        // deleted all old metadata
        Assert.assertEquals(0, fileMetaDataReader.listFiles(logPathIdentifier, eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
    } finally {
        // cleanup meta
        cleanupMetadata(transactional, datasetManager);
    }
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) LoggingContext(co.cask.cdap.common.logging.LoggingContext) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) FileMetaDataManager(co.cask.cdap.logging.write.FileMetaDataManager) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Aggregations

LoggingContext (co.cask.cdap.common.logging.LoggingContext)30 GET (javax.ws.rs.GET)12 Path (javax.ws.rs.Path)12 Test (org.junit.Test)11 FlowletLoggingContext (co.cask.cdap.logging.context.FlowletLoggingContext)9 RunRecordMeta (co.cask.cdap.internal.app.store.RunRecordMeta)6 LoggingTester (co.cask.cdap.logging.appender.LoggingTester)5 FileLogReader (co.cask.cdap.logging.read.FileLogReader)4 LogEvent (co.cask.cdap.logging.read.LogEvent)4 CConfiguration (co.cask.cdap.common.conf.CConfiguration)3 ApplicationLoggingContext (co.cask.cdap.common.logging.ApplicationLoggingContext)3 LogPathIdentifier (co.cask.cdap.logging.appender.system.LogPathIdentifier)3 FileMetaDataReader (co.cask.cdap.logging.meta.FileMetaDataReader)3 LogLocation (co.cask.cdap.logging.write.LogLocation)3 ProgramType (co.cask.cdap.proto.ProgramType)3 Transactional (co.cask.cdap.api.Transactional)2 DatasetManager (co.cask.cdap.api.dataset.DatasetManager)2 NamespaceLoggingContext (co.cask.cdap.common.logging.NamespaceLoggingContext)2 ServiceLoggingContext (co.cask.cdap.common.logging.ServiceLoggingContext)2 SystemDatasetInstantiator (co.cask.cdap.data.dataset.SystemDatasetInstantiator)2