use of io.cdap.cdap.runtime.spi.ssh.SSHKeyPair in project cdap by caskdata.
the class ExistingDataprocProvisioner method createCluster.
@Override
public Cluster createCluster(ProvisionerContext context) throws Exception {
Map<String, String> contextProperties = createContextProperties(context);
DataprocConf conf = DataprocConf.create(contextProperties);
if (context.getRuntimeMonitorType() == RuntimeMonitorType.SSH) {
String sshUser = contextProperties.get(SSH_USER);
String sshKey = contextProperties.get(SSH_KEY);
if (Strings.isNullOrEmpty(sshUser) || Strings.isNullOrEmpty(sshKey)) {
throw new DataprocRuntimeException("SSH User and key are required for monitoring through SSH.");
}
SSHKeyPair sshKeyPair = new SSHKeyPair(new SSHPublicKey(sshUser, ""), () -> sshKey.getBytes(StandardCharsets.UTF_8));
// The ssh context shouldn't be null, but protect it in case there is platform bug
Optional.ofNullable(context.getSSHContext()).ifPresent(c -> c.setSSHKeyPair(sshKeyPair));
}
String clusterName = contextProperties.get(CLUSTER_NAME);
try (DataprocClient client = DataprocClient.fromConf(conf, false)) {
try {
client.updateClusterLabels(clusterName, getSystemLabels());
} catch (DataprocRuntimeException e) {
// Only log the stacktrace if trace log level is enabled
if (LOG.isTraceEnabled()) {
LOG.trace("Cannot update cluster labels due to {}", e.getMessage(), e);
} else {
LOG.debug("Cannot update cluster labels due to {}", e.getMessage());
}
}
return client.getCluster(clusterName).filter(c -> c.getStatus() == ClusterStatus.RUNNING).orElseThrow(() -> new DataprocRuntimeException("Dataproc cluster " + clusterName + " does not exist or not in running state."));
}
}
use of io.cdap.cdap.runtime.spi.ssh.SSHKeyPair in project cdap by caskdata.
the class ElasticMapReduceProvisioner method createCluster.
@Override
public Cluster createCluster(ProvisionerContext context) throws Exception {
// Generates and set the ssh key
// or 'hadoop'
SSHKeyPair sshKeyPair = context.getSSHContext().generate("ec2-user");
context.getSSHContext().setSSHKeyPair(sshKeyPair);
EMRConf conf = EMRConf.fromProvisionerContext(context);
String clusterName = getClusterName(context.getProgramRunInfo());
try (EMRClient client = EMRClient.fromConf(conf)) {
// if it already exists, it means this is a retry. We can skip actually making the request
Optional<ClusterSummary> existing = client.getUnterminatedClusterByName(clusterName);
if (existing.isPresent()) {
return client.getCluster(existing.get().getId()).get();
}
String clusterId = client.createCluster(clusterName);
return new Cluster(clusterId, ClusterStatus.CREATING, Collections.emptyList(), Collections.emptyMap());
}
}
use of io.cdap.cdap.runtime.spi.ssh.SSHKeyPair in project cdap by caskdata.
the class RemoteHadoopConf method fromProperties.
/**
* Create the conf from a property map while also performing validation.
*/
public static RemoteHadoopConf fromProperties(Map<String, String> properties) {
String host = getString(properties, "host");
String user = getString(properties, "user");
String privateKey = getString(properties, "sshKey");
SSHKeyPair keyPair = new SSHKeyPair(new SSHPublicKey(user, ""), () -> privateKey.getBytes(StandardCharsets.UTF_8));
return new RemoteHadoopConf(keyPair, host, properties.get("initializationAction"), properties.get("kerberosPrincipal"), properties.get("kerberosKeytabPath"));
}
use of io.cdap.cdap.runtime.spi.ssh.SSHKeyPair in project cdap by caskdata.
the class ProvisioningService method createDeprovisionTask.
private Runnable createDeprovisionTask(ProvisioningTaskInfo taskInfo, Provisioner provisioner, Consumer<ProgramRunId> taskCleanup) {
Map<String, String> properties = taskInfo.getProvisionerProperties();
ProvisionerContext context;
SSHKeyPair sshKeyPair = null;
try {
sshKeyPair = createSSHKeyPair(taskInfo);
} catch (IOException e) {
LOG.warn("Failed to load ssh key. No SSH key will be available for the deprovision task", e);
}
ProgramRunId programRunId = taskInfo.getProgramRunId();
Map<String, String> systemArgs = taskInfo.getProgramOptions().getArguments().asMap();
try {
SSHContext sshContext = new DefaultSSHContext(Networks.getAddress(cConf, Constants.NETWORK_PROXY_ADDRESS), null, sshKeyPair);
context = createContext(cConf, taskInfo.getProgramOptions(), programRunId, taskInfo.getUser(), properties, sshContext);
} catch (InvalidMacroException e) {
runWithProgramLogging(programRunId, systemArgs, () -> LOG.error("Could not evaluate macros while deprovisoning. " + "The cluster will be marked as orphaned.", e));
provisionerNotifier.orphaned(programRunId);
return () -> {
};
}
DeprovisionTask task = new DeprovisionTask(taskInfo, transactionRunner, 300, provisioner, context, provisionerNotifier, locationFactory);
ProvisioningTaskKey taskKey = new ProvisioningTaskKey(programRunId, ProvisioningOp.Type.DEPROVISION);
return () -> taskExecutor.submit(taskKey, () -> callWithProgramLogging(programRunId, systemArgs, () -> {
try {
long delay = task.executeOnce();
if (delay < 0) {
taskCleanup.accept(programRunId);
}
return delay;
} catch (InterruptedException e) {
// We can get interrupted if the task is cancelled or CDAP is stopped. In either case, just return.
// If it was cancelled, state cleanup is left to the caller. If it was CDAP master stopping, the task
// will be resumed on master startup
LOG.debug("Deprovision task for program run {} interrupted.", programRunId);
throw e;
} catch (Exception e) {
// Otherwise, if there was an error deprovisioning, run the cleanup
LOG.info("Deprovision task for program run {} failed.", programRunId, e);
taskCleanup.accept(programRunId);
throw e;
}
}));
}
use of io.cdap.cdap.runtime.spi.ssh.SSHKeyPair in project cdap by caskdata.
the class ProvisioningService method createSSHKeyPair.
/**
* Creates a {@link SSHKeyPair} based on the given {@link ProvisioningTaskInfo}.
*
* @param taskInfo the task info containing information about the ssh keys
* @return a {@link SSHKeyPair} or {@code null} if ssh key information are not present in the task info
*/
@Nullable
private SSHKeyPair createSSHKeyPair(ProvisioningTaskInfo taskInfo) throws IOException {
// Check if there is ssh user property in the Cluster
String sshUser = Optional.ofNullable(taskInfo.getCluster()).map(Cluster::getProperties).map(p -> p.get(Constants.RuntimeMonitor.SSH_USER)).orElse(null);
if (sshUser == null) {
return null;
}
Location keysDir = locationFactory.create(taskInfo.getSecureKeysDir());
Location publicKeyLocation = keysDir.append(Constants.RuntimeMonitor.PUBLIC_KEY);
Location privateKeyLocation = keysDir.append(Constants.RuntimeMonitor.PRIVATE_KEY);
if (!publicKeyLocation.exists() || !privateKeyLocation.exists()) {
return null;
}
return new LocationBasedSSHKeyPair(keysDir, sshUser);
}
Aggregations