use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ProgramLifecycleService method issueStop.
/**
* Issues a command to stop the specified {@link RunId} of the specified {@link ProgramId} and returns a
* {@link ListenableFuture} with the {@link ProgramController} for it.
* Clients can wait for completion of the {@link ListenableFuture}.
*
* @param programId the {@link ProgramId program} to issue a stop for
* @param runId the runId of the program run to stop. If null, all runs of the program as returned by
* {@link ProgramRuntimeService} are stopped.
* @return a list of {@link ListenableFuture} with a {@link ProgramController} that clients can wait on for stop
* to complete.
* @throws NotFoundException if the app, program or run was not found
* @throws BadRequestException if an attempt is made to stop a program that is either not running or
* was started by a workflow
* @throws UnauthorizedException if the user issuing the command is not authorized to stop the program. To stop a
* program, a user requires {@link Action#EXECUTE} permission on the program.
*/
public List<ListenableFuture<ProgramController>> issueStop(ProgramId programId, @Nullable String runId) throws Exception {
authorizationEnforcer.enforce(programId, authenticationContext.getPrincipal(), Action.EXECUTE);
List<ProgramRuntimeService.RuntimeInfo> runtimeInfos = findRuntimeInfo(programId, runId);
if (runtimeInfos.isEmpty()) {
if (!store.applicationExists(programId.getParent())) {
throw new ApplicationNotFoundException(programId.getParent());
} else if (!store.programExists(programId)) {
throw new ProgramNotFoundException(programId);
} else if (runId != null) {
ProgramRunId programRunId = programId.run(runId);
// Check if the program is running and is started by the Workflow
RunRecordMeta runRecord = store.getRun(programId, runId);
if (runRecord != null && runRecord.getProperties().containsKey("workflowrunid") && runRecord.getStatus().equals(ProgramRunStatus.RUNNING)) {
String workflowRunId = runRecord.getProperties().get("workflowrunid");
throw new BadRequestException(String.format("Cannot stop the program '%s' started by the Workflow " + "run '%s'. Please stop the Workflow.", programRunId, workflowRunId));
}
throw new NotFoundException(programRunId);
}
throw new BadRequestException(String.format("Program '%s' is not running.", programId));
}
List<ListenableFuture<ProgramController>> futures = new ArrayList<>();
for (ProgramRuntimeService.RuntimeInfo runtimeInfo : runtimeInfos) {
futures.add(runtimeInfo.getController().stop());
}
return futures;
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ProgramLifecycleService method stop.
/**
* Stops the specified run of the specified program.
*
* @param programId the {@link ProgramId program} to stop
* @param runId the runId of the program run to stop. If null, all runs of the program as returned by
* {@link ProgramRuntimeService} are stopped.
* @throws NotFoundException if the app, program or run was not found
* @throws BadRequestException if an attempt is made to stop a program that is either not running or
* was started by a workflow
* @throws InterruptedException if there was a problem while waiting for the stop call to complete
* @throws ExecutionException if there was a problem while waiting for the stop call to complete
*/
public void stop(ProgramId programId, @Nullable String runId) throws Exception {
List<ListenableFuture<ProgramController>> futures = issueStop(programId, runId);
// Block until all stop requests completed. This call never throw ExecutionException
Futures.successfulAsList(futures).get();
Throwable failureCause = null;
for (ListenableFuture<ProgramController> f : futures) {
try {
f.get();
} catch (ExecutionException e) {
// an IllegalStateException will be throw, which we can safely ignore
if (!(e.getCause() instanceof IllegalStateException)) {
if (failureCause == null) {
failureCause = e.getCause();
} else {
failureCause.addSuppressed(e.getCause());
}
}
}
}
if (failureCause != null) {
throw new ExecutionException(String.format("%d out of %d runs of the program %s failed to stop", failureCause.getSuppressed().length + 1, futures.size(), programId), failureCause);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class AbstractHBaseTableUtilTest method testListAllInNamespace.
@Test
public void testListAllInNamespace() throws Exception {
HBaseTableUtil tableUtil = getTableUtil();
Set<TableId> fooNamespaceTableIds = ImmutableSet.of(TableId.from("foo", "some.table1"), TableId.from("foo", "other.table"), TableId.from("foo", "some.table2"));
String fooNamespaceInHbase = String.format("%s_foo", getPrefix());
Set<TableId> fooNamespaceHTableIds = ImmutableSet.of(TableId.from(fooNamespaceInHbase, "some.table1"), TableId.from(fooNamespaceInHbase, "other.table"), TableId.from(fooNamespaceInHbase, "some.table2"));
createNamespace("foo");
createNamespace("foo_bar");
TableId tableIdInOtherNamespace = TableId.from("foo_bar", "my.dataset");
TableId hTableIdInOtherNamespace = TableId.from(String.format("%s_foo_bar", getPrefix()), "my.dataset");
List<ListenableFuture<TableId>> createFutures = new ArrayList<>();
for (TableId tableId : fooNamespaceTableIds) {
createFutures.add(createAsync(tableId));
}
createFutures.add(createAsync(tableIdInOtherNamespace));
Futures.allAsList(createFutures).get(60, TimeUnit.SECONDS);
Set<TableId> retrievedTableIds = ImmutableSet.copyOf(tableUtil.listTablesInNamespace(hAdmin, tableUtil.getHBaseNamespace(new NamespaceId("foo"))));
Assert.assertEquals(fooNamespaceHTableIds, retrievedTableIds);
Set<TableId> allTableIds = ImmutableSet.<TableId>builder().addAll(fooNamespaceHTableIds).add(hTableIdInOtherNamespace).build();
Assert.assertEquals(allTableIds, ImmutableSet.copyOf(tableUtil.listTables(hAdmin)));
Assert.assertEquals(4, hAdmin.listTables().length);
tableUtil.deleteAllInNamespace(ddlExecutor, tableUtil.getHBaseNamespace(new NamespaceId("foo")), hAdmin.getConfiguration());
Assert.assertEquals(1, hAdmin.listTables().length);
drop(tableIdInOtherNamespace);
Assert.assertEquals(0, hAdmin.listTables().length);
deleteNamespace("foo_bar");
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class InMemoryProgramRuntimeService method stopAllPrograms.
private void stopAllPrograms() {
LOG.info("Stopping all running programs.");
List<ListenableFuture<ProgramController>> futures = Lists.newLinkedList();
for (ProgramType type : ProgramType.values()) {
for (Map.Entry<RunId, RuntimeInfo> entry : list(type).entrySet()) {
RuntimeInfo runtimeInfo = entry.getValue();
if (isRunning(runtimeInfo.getProgramId())) {
futures.add(runtimeInfo.getController().stop());
}
}
}
// unchecked because we cannot do much if it fails. We will still shutdown the standalone CDAP instance.
try {
Futures.successfulAsList(futures).get(60, TimeUnit.SECONDS);
LOG.info("All programs have been stopped.");
} catch (ExecutionException e) {
// note this should not happen because we wait on a successfulAsList
LOG.warn("Got exception while waiting for all programs to stop", e.getCause());
} catch (InterruptedException e) {
LOG.warn("Got interrupted exception while waiting for all programs to stop", e);
Thread.currentThread().interrupt();
} catch (TimeoutException e) {
// can't do much more than log it. We still want to exit.
LOG.warn("Timeout while waiting for all programs to stop.");
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class SparkRuntimeContextProvider method createIfNotExists.
/**
* Creates a singleton {@link SparkRuntimeContext}.
* It has assumption on file location that are localized by the SparkRuntimeService.
*/
private static synchronized SparkRuntimeContext createIfNotExists() {
if (sparkRuntimeContext != null) {
return sparkRuntimeContext;
}
try {
CConfiguration cConf = createCConf();
Configuration hConf = createHConf();
SparkRuntimeContextConfig contextConfig = new SparkRuntimeContextConfig(hConf);
// Should be yarn only and only for executor node, not the driver node.
Preconditions.checkState(!contextConfig.isLocal() && Boolean.parseBoolean(System.getenv("SPARK_YARN_MODE")), "SparkContextProvider.getSparkContext should only be called in Spark executor process.");
// Create the program
Program program = createProgram(cConf, contextConfig);
Injector injector = createInjector(cConf, hConf, contextConfig.getProgramId(), contextConfig.getProgramOptions());
final Service logAppenderService = new LogAppenderService(injector.getInstance(LogAppenderInitializer.class), contextConfig.getProgramOptions());
final ZKClientService zkClientService = injector.getInstance(ZKClientService.class);
final KafkaClientService kafkaClientService = injector.getInstance(KafkaClientService.class);
final MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
final StreamCoordinatorClient streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
// Use the shutdown hook to shutdown services, since this class should only be loaded from System classloader
// of the spark executor, hence there should be exactly one instance only.
// The problem with not shutting down nicely is that some logs/metrics might be lost
Services.chainStart(logAppenderService, zkClientService, kafkaClientService, metricsCollectionService, streamCoordinatorClient);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// The logger may already been shutdown. Use System.out/err instead
System.out.println("Shutting SparkClassLoader services");
Future<List<ListenableFuture<Service.State>>> future = Services.chainStop(logAppenderService, streamCoordinatorClient, metricsCollectionService, kafkaClientService, zkClientService);
try {
List<ListenableFuture<Service.State>> futures = future.get(5, TimeUnit.SECONDS);
System.out.println("SparkClassLoader services shutdown completed: " + futures);
} catch (Exception e) {
System.err.println("Exception when shutting down services");
e.printStackTrace(System.err);
}
}
});
// Constructor the DatasetFramework
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, contextConfig.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(cConf, contextConfig, program.getClassLoader());
// Create the context object
sparkRuntimeContext = new SparkRuntimeContext(contextConfig.getConfiguration(), program, contextConfig.getProgramOptions(), cConf, getHostname(), injector.getInstance(TransactionSystemClient.class), programDatasetFramework, injector.getInstance(DiscoveryServiceClient.class), metricsCollectionService, injector.getInstance(StreamAdmin.class), contextConfig.getWorkflowProgramInfo(), pluginInstantiator, injector.getInstance(SecureStore.class), injector.getInstance(SecureStoreManager.class), injector.getInstance(AuthorizationEnforcer.class), injector.getInstance(AuthenticationContext.class), injector.getInstance(MessagingService.class));
LoggingContextAccessor.setLoggingContext(sparkRuntimeContext.getLoggingContext());
return sparkRuntimeContext;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
Aggregations