use of co.cask.cdap.logging.framework.LogPipelineSpecification in project cdap by caskdata.
the class LocalLogAppender method start.
@Override
public void start() {
if (!started.compareAndSet(false, true)) {
return;
}
// Load and starts all configured log processing pipelines
LogPipelineLoader pipelineLoader = new LogPipelineLoader(cConf);
Map<String, LogPipelineSpecification<AppenderContext>> specs = pipelineLoader.load(new Provider<AppenderContext>() {
@Override
public AppenderContext get() {
return new LocalAppenderContext(datasetFramework, txClient, locationFactory, metricsCollectionService);
}
});
// Use the event delay as the sync interval
long syncIntervalMillis = cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS);
for (LogPipelineSpecification<AppenderContext> spec : specs.values()) {
LogProcessorPipelineContext context = new LogProcessorPipelineContext(cConf, spec.getName(), spec.getContext(), spec.getContext().getMetricsContext(), spec.getContext().getInstanceId());
LocalLogProcessorPipeline pipeline = new LocalLogProcessorPipeline(context, syncIntervalMillis);
pipeline.startAndWait();
pipelineThreads.add(pipeline.getAppenderThread());
pipelines.add(pipeline);
}
super.start();
}
use of co.cask.cdap.logging.framework.LogPipelineSpecification in project cdap by caskdata.
the class DistributedLogFramework method createService.
@Override
protected Service createService(Set<Integer> partitions) {
Map<String, LogPipelineSpecification<AppenderContext>> specs = new LogPipelineLoader(cConf).load(contextProvider);
int pipelineCount = specs.size();
// Create one KafkaLogProcessorPipeline per spec
final List<Service> pipelines = new ArrayList<>();
for (final LogPipelineSpecification<AppenderContext> pipelineSpec : specs.values()) {
final CConfiguration cConf = pipelineSpec.getConf();
final AppenderContext context = pipelineSpec.getContext();
long bufferSize = getBufferSize(pipelineCount, cConf, partitions.size());
final String topic = cConf.get(Constants.Logging.KAFKA_TOPIC);
final KafkaPipelineConfig config = new KafkaPipelineConfig(topic, partitions, bufferSize, cConf.getLong(Constants.Logging.PIPELINE_EVENT_DELAY_MS), cConf.getInt(Constants.Logging.PIPELINE_KAFKA_FETCH_SIZE), cConf.getLong(Constants.Logging.PIPELINE_CHECKPOINT_INTERVAL_MS));
RetryStrategy retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.log.process.");
pipelines.add(new RetryOnStartFailureService(new Supplier<Service>() {
@Override
public Service get() {
return new KafkaLogProcessorPipeline(new LogProcessorPipelineContext(cConf, context.getName(), context, context.getMetricsContext(), context.getInstanceId()), checkpointManagerFactory.create(topic, pipelineSpec.getCheckpointPrefix()), brokerService, config);
}
}, retryStrategy));
}
// Returns a Service that start/stop all pipelines.
return new AbstractIdleService() {
@Override
protected void startUp() throws Exception {
// Starts all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.start();
}
}));
}
@Override
protected void shutDown() throws Exception {
// Stops all pipeline
validateAllFutures(Iterables.transform(pipelines, new Function<Service, ListenableFuture<State>>() {
@Override
public ListenableFuture<State> apply(Service service) {
return service.stop();
}
}));
}
};
}
Aggregations