use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.
the class RollingLocationLogAppender method start.
@Override
public void start() {
// These should all passed. The settings are from the custom-log-pipeline.xml and
// the context must be AppenderContext
Preconditions.checkState(basePath != null, "Property basePath must be base directory.");
Preconditions.checkState(filePath != null, "Property filePath must be filePath along with filename.");
Preconditions.checkState(triggeringPolicy != null, "Property triggeringPolicy must be specified.");
Preconditions.checkState(rollingPolicy != null, "Property rollingPolicy must be specified");
Preconditions.checkState(encoder != null, "Property encoder must be specified.");
Preconditions.checkState(dirPermissions != null, "Property dirPermissions cannot be null");
Preconditions.checkState(filePermissions != null, "Property filePermissions cannot be null");
if (context instanceof AppenderContext) {
AppenderContext context = (AppenderContext) this.context;
locationManager = new LocationManager(context.getLocationFactory(), basePath, dirPermissions, filePermissions, fileMaxInactiveTimeMs);
filePath = filePath.replace("instanceId", Integer.toString(context.getInstanceId()));
} else if (!Boolean.TRUE.equals(context.getObject(Constants.Logging.PIPELINE_VALIDATION))) {
throw new IllegalStateException("Expected logger context instance of " + AppenderContext.class.getName() + " but got " + context.getClass().getName());
}
started = true;
}
use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.
the class DistributedLogFramework method createService.
@Override
protected Service createService(Set<Integer> partitions) {
Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
int pipelineCount = specs.size();
// Create one KafkaLogProcessorPipeline per spec
final List<Service> pipelines = new ArrayList<>();
for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
final CConfiguration cConf = pipelineSpec.getConf();
final AppenderContext context = pipelineSpec.getContext();
long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {
@Override
public Service get() {
return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
}
}, retryStrategy));
}
// Returns a Service that start/stop all pipelines.
return new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
// Starts all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.start();
}
}));
}
@Override
protected void shutDown() throws Exception {
// Stops all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.stop();
}
}));
}
};
}
use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.
the class CDAPLogAppender method start.
@Override
public void start() {
// These should all passed. The settings are from the cdap-log-pipeline.xml and the context must be AppenderContext
Preconditions.checkState(dirPermissions != null, "Property dirPermissions cannot be null");
Preconditions.checkState(filePermissions != null, "Property filePermissions cannot be null");
Preconditions.checkState(syncIntervalBytes > 0, "Property syncIntervalBytes must be > 0.");
Preconditions.checkState(maxFileLifetimeMs > 0, "Property maxFileLifetimeMs must be > 0");
Preconditions.checkState(maxFileSizeInBytes > 0, "Property maxFileSizeInBytes must be > 0");
Preconditions.checkState(fileRetentionDurationDays > 0, "Property fileRetentionDurationDays must be > 0");
Preconditions.checkState(logCleanupIntervalMins > 0, "Property logCleanupIntervalMins must be > 0");
Preconditions.checkState(fileCleanupTransactionTimeout > Constants.Logging.TX_TIMEOUT_DISCOUNT_SECS, String.format("Property fileCleanupTransactionTimeout must be greater than %s seconds", Constants.Logging.TX_TIMEOUT_DISCOUNT_SECS));
if (context instanceof AppenderContext) {
AppenderContext context = (AppenderContext) this.context;
logFileManager = new LogFileManager(dirPermissions, filePermissions, maxFileLifetimeMs, maxFileSizeInBytes, syncIntervalBytes, new FileMetaDataWriter(context.getDatasetManager(), context), context.getLocationFactory());
if (context.getInstanceId() == 0) {
scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("log-clean-up"));
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(context.getDatasetManager(), context);
LogCleaner logCleaner = new LogCleaner(fileMetadataCleaner, context.getLocationFactory(), TimeUnit.DAYS.toMillis(fileRetentionDurationDays), fileCleanupTransactionTimeout);
scheduledExecutorService.scheduleAtFixedRate(logCleaner, 10, logCleanupIntervalMins, TimeUnit.MINUTES);
}
} else if (!Boolean.TRUE.equals(context.getObject(Constants.Logging.PIPELINE_VALIDATION))) {
throw new IllegalStateException("Expected logger context instance of " + AppenderContext.class.getName() + " but get " + context.getClass().getName());
}
super.start();
}
use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.
the class CDAPLogAppenderTest method testCDAPLogAppenderRotation.
@Test
public void testCDAPLogAppenderRotation() throws Exception {
int syncInterval = 1024 * 1024;
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
context.start();
cdapLogAppender.setSyncIntervalBytes(syncInterval);
cdapLogAppender.setMaxFileLifetimeMs(500);
cdapLogAppender.setMaxFileSizeInBytes(104857600);
cdapLogAppender.setDirPermissions("750");
cdapLogAppender.setFilePermissions("640");
cdapLogAppender.setFileRetentionDurationDays(1);
cdapLogAppender.setLogCleanupIntervalMins(10);
cdapLogAppender.setFileCleanupTransactionTimeout(30);
cdapLogAppender.setContext(context);
cdapLogAppender.start();
Map<String, String> properties = new HashMap<>();
properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "testTimeRotation");
properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
long currentTimeMillisEvent1 = System.currentTimeMillis();
LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1", properties);
event1.setTimeStamp(currentTimeMillisEvent1);
cdapLogAppender.doAppend(event1);
// Pause pass the max file lifetime ms
TimeUnit.MILLISECONDS.sleep(500);
long currentTimeMillisEvent2 = System.currentTimeMillis();
LoggingEvent event2 = getLoggingEvent("co.cask.Test2", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 2", properties);
event2.setTimeStamp(currentTimeMillisEvent1 + 1000);
cdapLogAppender.doAppend(event2);
cdapLogAppender.stop();
context.stop();
try {
List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
Assert.assertEquals(2, files.size());
assertLogEventDetails(event1, files.get(0));
assertLogEventDetails(event2, files.get(1));
Assert.assertEquals(currentTimeMillisEvent1, files.get(0).getEventTimeMs());
Assert.assertEquals(currentTimeMillisEvent1 + 1000, files.get(1).getEventTimeMs());
Assert.assertTrue(files.get(0).getFileCreationTimeMs() >= currentTimeMillisEvent1);
Assert.assertTrue(files.get(1).getFileCreationTimeMs() >= currentTimeMillisEvent2);
// checking permission
String expectedPermissions = "rw-r-----";
for (LogLocation file : files) {
Location location = file.getLocation();
Assert.assertEquals(expectedPermissions, location.getPermissions());
}
} catch (Exception e) {
Assert.fail();
}
}
use of co.cask.cdap.api.logging.AppenderContext in project cdap by caskdata.
the class CDAPLogAppenderTest method testCDAPLogAppenderSizeBasedRotation.
@Test
public void testCDAPLogAppenderSizeBasedRotation() throws Exception {
int syncInterval = 1024 * 1024;
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
AppenderContext context = new LocalAppenderContext(injector.getInstance(DatasetFramework.class), injector.getInstance(TransactionSystemClient.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
context.start();
cdapLogAppender.setSyncIntervalBytes(syncInterval);
cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
cdapLogAppender.setMaxFileSizeInBytes(500);
cdapLogAppender.setDirPermissions("750");
cdapLogAppender.setFilePermissions("640");
cdapLogAppender.setFileRetentionDurationDays(1);
cdapLogAppender.setLogCleanupIntervalMins(10);
cdapLogAppender.setFileCleanupTransactionTimeout(30);
cdapLogAppender.setContext(context);
cdapLogAppender.start();
Map<String, String> properties = new HashMap<>();
properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "testSizeRotation");
properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
properties.put(FlowletLoggingContext.TAG_FLOW_ID, "testFlow");
properties.put(FlowletLoggingContext.TAG_FLOWLET_ID, "testFlowlet");
long currentTimeMillisEvent1 = System.currentTimeMillis();
LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1", properties);
event1.setTimeStamp(currentTimeMillisEvent1);
cdapLogAppender.doAppend(event1);
// sync updates the file size
cdapLogAppender.sync();
long currentTimeMillisEvent2 = System.currentTimeMillis();
LoggingEvent event2 = getLoggingEvent("co.cask.Test2", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 2", properties);
event2.setTimeStamp(currentTimeMillisEvent2);
// one new append, we will rotate to new file as the file size limit is very low and last append exceeded that.
cdapLogAppender.doAppend(event2);
cdapLogAppender.stop();
context.stop();
try {
List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
Assert.assertEquals(2, files.size());
assertLogEventDetails(event1, files.get(0));
assertLogEventDetails(event2, files.get(1));
Assert.assertEquals(currentTimeMillisEvent1, files.get(0).getEventTimeMs());
Assert.assertEquals(currentTimeMillisEvent2, files.get(1).getEventTimeMs());
Assert.assertTrue(files.get(0).getFileCreationTimeMs() >= currentTimeMillisEvent1);
Assert.assertTrue(files.get(1).getFileCreationTimeMs() >= currentTimeMillisEvent2);
} catch (Exception e) {
Assert.fail();
}
}
Aggregations