Search in sources :

Example 6 with AppenderContext

use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.

the class RollingLocationLogAppender method start.

@Override
public void start() {
    // These should all passed. The settings are from the custom-log-pipeline.xml and
    // the context must be AppenderContext
    Preconditions.checkState(basePath != null, "Property basePath must be base directory.");
    Preconditions.checkState(filePath != null, "Property filePath must be filePath along with filename.");
    Preconditions.checkState(triggeringPolicy != null, "Property triggeringPolicy must be specified.");
    Preconditions.checkState(rollingPolicy != null, "Property rollingPolicy must be specified");
    Preconditions.checkState(encoder != null, "Property encoder must be specified.");
    Preconditions.checkState(dirPermissions != null, "Property dirPermissions cannot be null");
    Preconditions.checkState(filePermissions != null, "Property filePermissions cannot be null");
    if (context instanceof AppenderContext) {
        AppenderContext context = (AppenderContext) this.context;
        locationManager = new LocationManager(context.getLocationFactory(), basePath, dirPermissions, filePermissions, fileMaxInactiveTimeMs);
        filePath = filePath.replace("instanceId", Integer.toString(context.getInstanceId()));
    } else if (!Boolean.TRUE.equals(context.getObject(Constants.Logging.PIPELINE_VALIDATION))) {
        throw new IllegalStateException("Expected logger context instance of " + AppenderContext.class.getName() + " but got " + context.getClass().getName());
    }
    started = true;
}
Also used : AppenderContext(co.cask.cdap.api.logging.AppenderContext)

Example 7 with AppenderContext

use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.

the class DistributedLogFramework method createService.

@Override
protected Service createService(Set<Integer> partitions) {
    Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
    int pipelineCount = specs.size();
    // Create one KafkaLogProcessorPipeline per spec
    final List<Service> pipelines = new ArrayList<>();
    for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
        final CConfiguration cConf = pipelineSpec.getConf();
        final AppenderContext context = pipelineSpec.getContext();
        long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
        final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
        final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
        RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
        pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {

            @Override
            public Service get() {
                return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
            }
        }, retryStrategy));
    }
    // Returns a Service that start/stop all pipelines.
    return new AbstractIdleService() {

        @Override
        protected void startUp() throws Exception {
            // Starts all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.start();
                }
            }));
        }

        @Override
        protected void shutDown() throws Exception {
            // Stops all pipeline
            validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {

                @Override
                public ListenableFuture<State> apply(Service service) {
                    return service.stop();
                }
            }));
        }
    };
}
Also used : LogPipelineSpecification(co.cask.cdap.logging.framework.LogPipelineSpecification) ArrayList(java.util.ArrayList) KafkaPipelineConfig(co.cask.cdap.logging.pipeline.kafka.KafkaPipelineConfig) ResourceBalancerService(co.cask.cdap.common.resource.ResourceBalancerService) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) DiscoveryService(org.apache.twill.discovery.DiscoveryService) Service(com.google.common.util.concurrent.Service) BrokerService(org.apache.twill.kafka.client.BrokerService) LogPipelineLoader(co.cask.cdap.logging.framework.LogPipelineLoader) LogProcessorPipelineContext(co.cask.cdap.logging.pipeline.LogProcessorPipelineContext) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Function(com.google.common.base.Function) KafkaLogProcessorPipeline(co.cask.cdap.logging.pipeline.kafka.KafkaLogProcessorPipeline) AppenderContext(co.cask.cdap.api.logging.AppenderContext) RetryOnStartFailureService(co.cask.cdap.common.service.RetryOnStartFailureService) Supplier(com.google.common.base.Supplier) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) RetryStrategy(co.cask.cdap.common.service.RetryStrategy)

Example 8 with AppenderContext

use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.

the class CDAPLogAppender method start.

@Override
public void start() {
    // These should all passed. The settings are from the cdap-log-pipeline.xml and the context must be AppenderContext
    Preconditions.checkState(dirPermissions != null, "Property dirPermissions cannot be null");
    Preconditions.checkState(filePermissions != null, "Property filePermissions cannot be null");
    Preconditions.checkState(syncIntervalBytes > 0, "Property syncIntervalBytes must be > 0.");
    Preconditions.checkState(maxFileLifetimeMs > 0, "Property maxFileLifetimeMs must be > 0");
    Preconditions.checkState(maxFileSizeInBytes > 0, "Property maxFileSizeInBytes must be > 0");
    Preconditions.checkState(fileRetentionDurationDays > 0, "Property fileRetentionDurationDays must be > 0");
    Preconditions.checkState(logCleanupIntervalMins > 0, "Property logCleanupIntervalMins must be > 0");
    Preconditions.checkState(fileCleanupTransactionTimeout > Constants.Logging.TX_TIMEOUT_DISCOUNT_SECS, String.format("Property fileCleanupTransactionTimeout must be greater than %s seconds", Constants.Logging.TX_TIMEOUT_DISCOUNT_SECS));
    if (context instanceof AppenderContext) {
        AppenderContext context = (AppenderContext) this.context;
        logFileManager = new LogFileManager(dirPermissions, filePermissions, maxFileLifetimeMs, maxFileSizeInBytes, syncIntervalBytes, new FileMetaDataWriter(context.getDatasetManager(), context), context.getLocationFactory());
        if (context.getInstanceId() == 0) {
            scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("log-clean-up"));
            FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(context.getDatasetManager(), context);
            LogCleaner logCleaner = new LogCleaner(fileMetadataCleaner, context.getLocationFactory(), TimeUnit.DAYS.toMillis(fileRetentionDurationDays), fileCleanupTransactionTimeout);
            scheduledExecutorService.scheduleAtFixedRate(logCleaner, 10, logCleanupIntervalMins, TimeUnit.MINUTES);
        }
    } else if (!Boolean.TRUE.equals(context.getObject(Constants.Logging.PIPELINE_VALIDATION))) {
        throw new IllegalStateException("Expected logger context instance of " + AppenderContext.class.getName() + " but get " + context.getClass().getName());
    }
    super.start();
}
Also used : FileMetadataCleaner(co.cask.cdap.logging.clean.FileMetadataCleaner) FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) AppenderContext(co.cask.cdap.api.logging.AppenderContext) LogCleaner(co.cask.cdap.logging.clean.LogCleaner)

Example 9 with AppenderContext

use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.

the class CDAPLogAppenderTest method testCDAPLogAppenderRotation.

@Test
public void testCDAPLogAppenderRotation() throws Exception {
    int syncInterval = 1024 * 1024;
    FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
    CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
    AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
    context.start();
    cdapLogAppender.setSyncIntervalBytes(syncInterval);
    cdapLogAppender.setMaxFileLifetimeMs(500);
    cdapLogAppender.setMaxFileSizeInBytes(104857600);
    cdapLogAppender.setDirPermissions("750");
    cdapLogAppender.setFilePermissions("640");
    cdapLogAppender.setFileRetentionDurationDays(1);
    cdapLogAppender.setLogCleanupIntervalMins(10);
    cdapLogAppender.setFileCleanupTransactionTimeout(30);
    cdapLogAppender.setContext(context);
    cdapLogAppender.start();
    Map<String, String> properties = new HashMap<>();
    properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "testTimeRotation");
    properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
    properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
    properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
    long currentTimeMillisEvent1 = System.currentTimeMillis();
    LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1", properties);
    event1.setTimeStamp(currentTimeMillisEvent1);
    cdapLogAppender.doAppend(event1);
    // Pause pass the max file lifetime ms
    TimeUnit.MILLISECONDS.sleep(500);
    long currentTimeMillisEvent2 = System.currentTimeMillis();
    LoggingEvent event2 = getLoggingEvent("co.cask.Test2", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 2", properties);
    event2.setTimeStamp(currentTimeMillisEvent1 + 1000);
    cdapLogAppender.doAppend(event2);
    cdapLogAppender.stop();
    context.stop();
    try {
        List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
        Assert.assertEquals(2, files.size());
        assertLogEventDetails(event1, files.get(0));
        assertLogEventDetails(event2, files.get(1));
        Assert.assertEquals(currentTimeMillisEvent1, files.get(0).getEventTimeMs());
        Assert.assertEquals(currentTimeMillisEvent1 + 1000, files.get(1).getEventTimeMs());
        Assert.assertTrue(files.get(0).getFileCreationTimeMs() >= currentTimeMillisEvent1);
        Assert.assertTrue(files.get(1).getFileCreationTimeMs() >= currentTimeMillisEvent2);
        // checking permission
        String expectedPermissions = "rw-r-----";
        for (LogLocation file : files) {
            Location location = file.getLocation();
            Assert.assertEquals(expectedPermissions, location.getPermissions());
        }
    } catch (Exception e) {
        Assert.fail();
    }
}
Also used : HashMap(java.util.HashMap) NoOpMetricsCollectionService(co.cask.cdap.common.metrics.NoOpMetricsCollectionService) IOException(java.io.IOException) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) LoggingEvent(ch.qos.logback.classic.spi.LoggingEvent) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) LogLocation(co.cask.cdap.logging.write.LogLocation) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) AppenderContext(co.cask.cdap.api.logging.AppenderContext) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 10 with AppenderContext

use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.

the class CDAPLogAppenderTest method testCDAPLogAppenderSizeBasedRotation.

@Test
public void testCDAPLogAppenderSizeBasedRotation() throws Exception {
    int syncInterval = 1024 * 1024;
    FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
    CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
    AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
    context.start();
    cdapLogAppender.setSyncIntervalBytes(syncInterval);
    cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
    cdapLogAppender.setMaxFileSizeInBytes(500);
    cdapLogAppender.setDirPermissions("750");
    cdapLogAppender.setFilePermissions("640");
    cdapLogAppender.setFileRetentionDurationDays(1);
    cdapLogAppender.setLogCleanupIntervalMins(10);
    cdapLogAppender.setFileCleanupTransactionTimeout(30);
    cdapLogAppender.setContext(context);
    cdapLogAppender.start();
    Map<String, String> properties = new HashMap<>();
    properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "testSizeRotation");
    properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
    properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
    properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
    long currentTimeMillisEvent1 = System.currentTimeMillis();
    LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1", properties);
    event1.setTimeStamp(currentTimeMillisEvent1);
    cdapLogAppender.doAppend(event1);
    // sync updates the file size
    cdapLogAppender.sync();
    long currentTimeMillisEvent2 = System.currentTimeMillis();
    LoggingEvent event2 = getLoggingEvent("co.cask.Test2", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 2", properties);
    event2.setTimeStamp(currentTimeMillisEvent2);
    // one new append, we will rotate to new file as the file size limit is very low and last append exceeded that.
    cdapLogAppender.doAppend(event2);
    cdapLogAppender.stop();
    context.stop();
    try {
        List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
        Assert.assertEquals(2, files.size());
        assertLogEventDetails(event1, files.get(0));
        assertLogEventDetails(event2, files.get(1));
        Assert.assertEquals(currentTimeMillisEvent1, files.get(0).getEventTimeMs());
        Assert.assertEquals(currentTimeMillisEvent2, files.get(1).getEventTimeMs());
        Assert.assertTrue(files.get(0).getFileCreationTimeMs() >= currentTimeMillisEvent1);
        Assert.assertTrue(files.get(1).getFileCreationTimeMs() >= currentTimeMillisEvent2);
    } catch (Exception e) {
        Assert.fail();
    }
}
Also used : HashMap(java.util.HashMap) NoOpMetricsCollectionService(co.cask.cdap.common.metrics.NoOpMetricsCollectionService) IOException(java.io.IOException) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) LoggingEvent(ch.qos.logback.classic.spi.LoggingEvent) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) LogLocation(co.cask.cdap.logging.write.LogLocation) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) LocalAppenderContext(co.cask.cdap.logging.framework.LocalAppenderContext) AppenderContext(co.cask.cdap.api.logging.AppenderContext) Test(org.junit.Test)

Aggregations

AppenderContext (co.cask.cdap.api.logging.AppenderContext)11 LocalAppenderContext (co.cask.cdap.logging.framework.LocalAppenderContext)7 NoOpMetricsCollectionService (co.cask.cdap.common.metrics.NoOpMetricsCollectionService)6 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)6 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)6 LocationFactory (org.apache.twill.filesystem.LocationFactory)6 Test (org.junit.Test)6 JoranConfigurator (ch.qos.logback.classic.joran.JoranConfigurator)3 LoggingEvent (ch.qos.logback.classic.spi.LoggingEvent)3 FileMetaDataReader (co.cask.cdap.logging.meta.FileMetaDataReader)3 LogLocation (co.cask.cdap.logging.write.LogLocation)3 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 Location (org.apache.twill.filesystem.Location)3 Logger (org.slf4j.Logger)3 LogPipelineLoader (co.cask.cdap.logging.framework.LogPipelineLoader)2 LogPipelineSpecification (co.cask.cdap.logging.framework.LogPipelineSpecification)2 LogProcessorPipelineContext (co.cask.cdap.logging.pipeline.LogProcessorPipelineContext)2 FileNamePattern (ch.qos.logback.core.rolling.helper.FileNamePattern)1 IntegerTokenConverter (ch.qos.logback.core.rolling.helper.IntegerTokenConverter)1