use of com.google.common.util.concurrent.Service in project cdap by caskdata.
the class ResourceBalancerService method createResourceHandler.
private ResourceHandler createResourceHandler(Discoverable discoverable) {
return new ResourceHandler(discoverable) {
private Service service;
@Override
public void onChange(Collection<PartitionReplica> partitionReplicas) {
Set<Integer> partitions = Sets.newHashSet();
for (PartitionReplica replica : partitionReplicas) {
partitions.add(Integer.valueOf(replica.getName()));
}
LOG.info("Partitions changed {}, service: {}", partitions, serviceName);
try {
if (service != null) {
service.stopAndWait();
}
if (partitions.isEmpty() || !election.isRunning()) {
service = null;
} else {
service = createService(partitions);
service.startAndWait();
}
} catch (Throwable t) {
LOG.error("Failed to change partitions, service: {}.", serviceName, t);
completion.setException(t);
stop();
}
}
@Override
public void finished(Throwable failureCause) {
try {
if (service != null) {
service.stopAndWait();
service = null;
}
completion.set(null);
} catch (Throwable t) {
LOG.error("Exception when stopping service {}", service, t);
Throwable cause = failureCause == null ? t : failureCause;
if (cause != t) {
cause.addSuppressed(t);
}
completion.setException(t);
// No need to call stop as this callback only happen during shutdown of this service
}
}
};
}
use of com.google.common.util.concurrent.Service in project cdap by caskdata.
the class ServiceProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
int instanceId = Integer.parseInt(options.getArguments().getOption(ProgramOptionConstants.INSTANCE_ID, "-1"));
Preconditions.checkArgument(instanceId >= 0, "Missing instance Id");
int instanceCount = Integer.parseInt(options.getArguments().getOption(ProgramOptionConstants.INSTANCES, "0"));
Preconditions.checkArgument(instanceCount > 0, "Invalid or missing instance count");
RunId runId = ProgramRunners.getRunId(options);
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType programType = program.getType();
Preconditions.checkNotNull(programType, "Missing processor type.");
Preconditions.checkArgument(programType == ProgramType.SERVICE, "Only Service process type is supported.");
ServiceSpecification spec = appSpec.getServices().get(program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Setup dataset framework context, if required
if (datasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) datasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
final PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
try {
ServiceHttpServer component = new ServiceHttpServer(host, program, options, cConf, spec, instanceId, instanceCount, serviceAnnouncer, metricsCollectionService, datasetFramework, txClient, discoveryServiceClient, pluginInstantiator, secureStore, secureStoreManager, messagingService, defaultArtifactManager);
// Add a service listener to make sure the plugin instantiator is closed when the http server is finished.
component.addListener(new ServiceListenerAdapter() {
@Override
public void terminated(Service.State from) {
Closeables.closeQuietly(pluginInstantiator);
}
@Override
public void failed(Service.State from, Throwable failure) {
Closeables.closeQuietly(pluginInstantiator);
}
}, Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new ServiceProgramControllerAdapter(component, program.getId(), runId, spec.getName() + "-" + instanceId);
component.start();
return controller;
} catch (Throwable t) {
Closeables.closeQuietly(pluginInstantiator);
throw t;
}
}
use of com.google.common.util.concurrent.Service in project cdap by caskdata.
the class AppFabricTestHelper method getInjector.
public static synchronized Injector getInjector(CConfiguration conf, @Nullable SConfiguration sConf, Module overrides) {
if (injector == null) {
configuration = conf;
configuration.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder("data").getAbsolutePath());
configuration.set(Constants.AppFabric.REST_PORT, Integer.toString(Networks.getRandomPort()));
configuration.setBoolean(Constants.Dangerous.UNRECOVERABLE_RESET, true);
injector = Guice.createInjector(Modules.override(new AppFabricTestModule(configuration, sConf)).with(overrides));
if (configuration.getBoolean(Constants.Security.ENABLED) && configuration.getBoolean(Constants.Security.Authorization.ENABLED)) {
injector.getInstance(AuthorizationBootstrapper.class).run();
}
MessagingService messagingService = injector.getInstance(MessagingService.class);
if (messagingService instanceof Service) {
((Service) messagingService).startAndWait();
}
injector.getInstance(TransactionManager.class).startAndWait();
injector.getInstance(DatasetOpExecutor.class).startAndWait();
injector.getInstance(DatasetService.class).startAndWait();
injector.getInstance(StreamCoordinatorClient.class).startAndWait();
injector.getInstance(NotificationService.class).startAndWait();
injector.getInstance(MetricsCollectionService.class).startAndWait();
Scheduler programScheduler = injector.getInstance(Scheduler.class);
if (programScheduler instanceof Service) {
((Service) programScheduler).startAndWait();
}
}
return injector;
}
use of com.google.common.util.concurrent.Service in project cdap by caskdata.
the class SparkRuntimeContextProvider method createIfNotExists.
/**
* Creates a singleton {@link SparkRuntimeContext}.
* It has assumption on file location that are localized by the SparkRuntimeService.
*/
private static synchronized SparkRuntimeContext createIfNotExists() {
if (sparkRuntimeContext != null) {
return sparkRuntimeContext;
}
try {
CConfiguration cConf = createCConf();
Configuration hConf = createHConf();
SparkRuntimeContextConfig contextConfig = new SparkRuntimeContextConfig(hConf);
// Should be yarn only and only for executor node, not the driver node.
Preconditions.checkState(!contextConfig.isLocal() && Boolean.parseBoolean(System.getenv("SPARK_YARN_MODE")), "SparkContextProvider.getSparkContext should only be called in Spark executor process.");
// Create the program
Program program = createProgram(cConf, contextConfig);
Injector injector = createInjector(cConf, hConf, contextConfig.getProgramId(), contextConfig.getProgramOptions());
final Service logAppenderService = new LogAppenderService(injector.getInstance(LogAppenderInitializer.class), contextConfig.getProgramOptions());
final ZKClientService zkClientService = injector.getInstance(ZKClientService.class);
final KafkaClientService kafkaClientService = injector.getInstance(KafkaClientService.class);
final MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
final StreamCoordinatorClient streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
// Use the shutdown hook to shutdown services, since this class should only be loaded from System classloader
// of the spark executor, hence there should be exactly one instance only.
// The problem with not shutting down nicely is that some logs/metrics might be lost
Services.chainStart(logAppenderService, zkClientService, kafkaClientService, metricsCollectionService, streamCoordinatorClient);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// The logger may already been shutdown. Use System.out/err instead
System.out.println("Shutting SparkClassLoader services");
Future<List<ListenableFuture<Service.State>>> future = Services.chainStop(logAppenderService, streamCoordinatorClient, metricsCollectionService, kafkaClientService, zkClientService);
try {
List<ListenableFuture<Service.State>> futures = future.get(5, TimeUnit.SECONDS);
System.out.println("SparkClassLoader services shutdown completed: " + futures);
} catch (Exception e) {
System.err.println("Exception when shutting down services");
e.printStackTrace(System.err);
}
}
});
// Constructor the DatasetFramework
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, contextConfig.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(cConf, contextConfig, program.getClassLoader());
// Create the context object
sparkRuntimeContext = new SparkRuntimeContext(contextConfig.getConfiguration(), program, contextConfig.getProgramOptions(), cConf, getHostname(), injector.getInstance(TransactionSystemClient.class), programDatasetFramework, injector.getInstance(DiscoveryServiceClient.class), metricsCollectionService, injector.getInstance(StreamAdmin.class), contextConfig.getWorkflowProgramInfo(), pluginInstantiator, injector.getInstance(SecureStore.class), injector.getInstance(SecureStoreManager.class), injector.getInstance(AuthorizationEnforcer.class), injector.getInstance(AuthenticationContext.class), injector.getInstance(MessagingService.class));
LoggingContextAccessor.setLoggingContext(sparkRuntimeContext.getLoggingContext());
return sparkRuntimeContext;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of com.google.common.util.concurrent.Service in project cdap by caskdata.
the class SparkProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
// Get the RunId first. It is used for the creation of the ClassLoader closing thread.
Arguments arguments = options.getArguments();
RunId runId = ProgramRunners.getRunId(options);
Deque<Closeable> closeables = new LinkedList<>();
try {
// Extract and verify parameters
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type.");
Preconditions.checkArgument(processorType == ProgramType.SPARK, "Only Spark process type is supported.");
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Get the WorkflowProgramInfo if it is started by Workflow
WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, appSpec);
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
if (pluginInstantiator != null) {
closeables.addFirst(pluginInstantiator);
}
SparkRuntimeContext runtimeContext = new SparkRuntimeContext(new Configuration(hConf), program, options, cConf, host, txClient, programDatasetFramework, discoveryServiceClient, metricsCollectionService, streamAdmin, workflowInfo, pluginInstantiator, secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService);
closeables.addFirst(runtimeContext);
Spark spark;
try {
spark = new InstantiatorFactory(false).get(TypeToken.of(program.<Spark>getMainClass())).create();
} catch (Exception e) {
LOG.error("Failed to instantiate Spark class for {}", spec.getClassName(), e);
throw Throwables.propagate(e);
}
SparkSubmitter submitter = SparkRuntimeContextConfig.isLocal(hConf) ? new LocalSparkSubmitter() : new DistributedSparkSubmitter(hConf, locationFactory, host, runtimeContext, options.getArguments().getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE));
Service sparkRuntimeService = new SparkRuntimeService(cConf, spark, getPluginArchive(options), runtimeContext, submitter);
sparkRuntimeService.addListener(createRuntimeServiceListener(program.getId(), runId, arguments, options.getUserArguments(), closeables, runtimeStore), Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new SparkProgramController(sparkRuntimeService, runtimeContext);
LOG.debug("Starting Spark Job. Context: {}", runtimeContext);
if (SparkRuntimeContextConfig.isLocal(hConf) || UserGroupInformation.isSecurityEnabled()) {
sparkRuntimeService.start();
} else {
ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
}
return controller;
} catch (Throwable t) {
closeAll(closeables);
throw Throwables.propagate(t);
}
}
Aggregations