use of io.cdap.cdap.runtime.spi.runtimejob.RuntimeJobManager in project cdap by caskdata.
the class RemoteExecutionTwillRunnerService method createPreparer.
private TwillPreparer createPreparer(CConfiguration cConf, Configuration hConf, ProgramRunId programRunId, ProgramOptions programOpts, TwillSpecification twillSpec, LocationCache locationCache, TwillControllerFactory controllerFactory) {
Location keysDirLocation = getKeysDirLocation(programOpts, locationFactory);
Map<String, Location> secretFiles = new HashMap<>();
if (SystemArguments.getRuntimeMonitorType(cConf, programOpts) == RuntimeMonitorType.SSH) {
secretFiles.put(Constants.RuntimeMonitor.SERVICE_PROXY_PASSWORD_FILE, generateAndSaveServiceProxySecret(programRunId, keysDirLocation));
}
if (SecurityUtil.isInternalAuthEnabled(cConf)) {
secretFiles.put(Constants.Security.Authentication.RUNTIME_TOKEN_FILE, generateAndSaveRuntimeToken(programRunId, keysDirLocation));
}
RuntimeJobManager jobManager = provisioningService.getRuntimeJobManager(programRunId, programOpts).orElse(null);
// Use RuntimeJobManager to launch the remote process if it is supported
if (jobManager != null) {
return new RuntimeJobTwillPreparer(cConf, hConf, twillSpec, programRunId, programOpts, secretFiles, locationCache, locationFactory, controllerFactory, () -> provisioningService.getRuntimeJobManager(programRunId, programOpts).orElseThrow(IllegalStateException::new));
}
// Use SSH if there is no RuntimeJobManager
ClusterKeyInfo clusterKeyInfo = new ClusterKeyInfo(cConf, programOpts, locationFactory);
return new RemoteExecutionTwillPreparer(cConf, hConf, clusterKeyInfo.getCluster(), clusterKeyInfo.getSSHConfig(), secretFiles, twillSpec, programRunId, programOpts, locationCache, locationFactory, controllerFactory);
}
use of io.cdap.cdap.runtime.spi.runtimejob.RuntimeJobManager in project cdap by caskdata.
the class RuntimeJobTwillPreparer method launch.
@Override
protected void launch(TwillRuntimeSpecification twillRuntimeSpec, RuntimeSpecification runtimeSpec, JvmOptions jvmOptions, Map<String, String> environments, Map<String, LocalFile> localFiles, TimeoutChecker timeoutChecker) throws Exception {
try (RuntimeJobManager jobManager = jobManagerSupplier.get()) {
timeoutChecker.throwIfTimeout();
Map<String, LocalFile> localizeFiles = new HashMap<>(localFiles);
for (Map.Entry<String, Location> secretFile : secretFiles.entrySet()) {
Location secretFileLocation = secretFile.getValue();
localizeFiles.put(secretFile.getKey(), new DefaultLocalFile(secretFile.getKey(), secretFileLocation.toURI(), secretFileLocation.lastModified(), secretFileLocation.length(), false, null));
}
RuntimeJobInfo runtimeJobInfo = createRuntimeJobInfo(runtimeSpec, localizeFiles, jvmOptions.getRunnableExtraOptions(runtimeSpec.getName()));
LOG.info("Starting runnable {} for runId {} with job manager.", runtimeSpec.getName(), getProgramRunId());
// launch job using job manager
jobManager.launch(runtimeJobInfo);
}
}
use of io.cdap.cdap.runtime.spi.runtimejob.RuntimeJobManager in project cdap by caskdata.
the class AbstractDataprocProvisioner method deleteClusterWithStatus.
@Override
public final ClusterStatus deleteClusterWithStatus(ProvisionerContext context, Cluster cluster) throws Exception {
Map<String, String> properties = createContextProperties(context);
DataprocConf conf = DataprocConf.create(properties);
RuntimeJobManager jobManager = getRuntimeJobManager(context).orElse(null);
// Also cleanup files created by the job run.
if (jobManager != null) {
try {
RuntimeJobDetail jobDetail = jobManager.getDetail(context.getProgramRunInfo()).orElse(null);
if (jobDetail != null && !jobDetail.getStatus().isTerminated()) {
return ClusterStatus.RUNNING;
}
} finally {
jobManager.close();
}
Storage storageClient = StorageOptions.newBuilder().setProjectId(conf.getProjectId()).setCredentials(conf.getDataprocCredentials()).build().getService();
DataprocUtils.deleteGCSPath(storageClient, properties.get(BUCKET), DataprocUtils.CDAP_GCS_ROOT + "/" + context.getProgramRunInfo().getRun());
}
doDeleteCluster(context, cluster, conf);
return ClusterStatus.DELETING;
}
Aggregations