use of org.apache.whirr.Cluster.Instance in project whirr by apache.
the class HadoopClusterActionHandler method getDeviceMappings.
protected Map<String, String> getDeviceMappings(ClusterActionEvent event) {
Set<Instance> instances = event.getCluster().getInstancesMatching(RolePredicates.role(getRole()));
Instance prototype = Iterables.getFirst(instances, null);
if (prototype == null) {
throw new IllegalStateException("No instances found in role " + getRole());
}
VolumeManager volumeManager = new VolumeManager();
return volumeManager.getDeviceMappings(event.getClusterSpec(), prototype);
}
use of org.apache.whirr.Cluster.Instance in project whirr by apache.
the class HadoopJobTrackerClusterActionHandler method doBeforeConfigure.
@Override
protected void doBeforeConfigure(ClusterActionEvent event) throws IOException {
Cluster cluster = event.getCluster();
Instance jobtracker = cluster.getInstanceMatching(role(ROLE));
event.getFirewallManager().addRules(Rule.create().destination(jobtracker).ports(HadoopCluster.JOBTRACKER_WEB_UI_PORT), Rule.create().source(HadoopCluster.getNamenodePublicAddress(cluster).getHostAddress()).destination(jobtracker).ports(HadoopCluster.JOBTRACKER_PORT));
}
use of org.apache.whirr.Cluster.Instance in project whirr by apache.
the class BootstrapClusterAction method doAction.
@Override
protected void doAction(Map<InstanceTemplate, ClusterActionEvent> eventMap) throws IOException, InterruptedException {
LOG.info("Bootstrapping cluster");
ExecutorService executorService = Executors.newCachedThreadPool();
Map<InstanceTemplate, Future<Set<? extends NodeMetadata>>> futures = Maps.newHashMap();
// initialize startup processes per InstanceTemplates
for (Entry<InstanceTemplate, ClusterActionEvent> entry : eventMap.entrySet()) {
final InstanceTemplate instanceTemplate = entry.getKey();
final ClusterSpec clusterSpec = entry.getValue().getClusterSpec();
final int maxNumberOfRetries = clusterSpec.getMaxStartupRetries();
StatementBuilder statementBuilder = entry.getValue().getStatementBuilder();
ComputeServiceContext computeServiceContext = getCompute().apply(clusterSpec);
final ComputeService computeService = computeServiceContext.getComputeService();
final Template template = BootstrapTemplate.build(clusterSpec, computeService, statementBuilder, entry.getKey());
Future<Set<? extends NodeMetadata>> nodesFuture = executorService.submit(new StartupProcess(clusterSpec.getClusterName(), instanceTemplate.getNumberOfInstances(), instanceTemplate.getMinNumberOfInstances(), maxNumberOfRetries, instanceTemplate.getRoles(), computeService, template, executorService, nodeStarterFactory));
futures.put(instanceTemplate, nodesFuture);
}
Set<Instance> instances = Sets.newLinkedHashSet();
for (Entry<InstanceTemplate, Future<Set<? extends NodeMetadata>>> entry : futures.entrySet()) {
Set<? extends NodeMetadata> nodes;
try {
nodes = entry.getValue().get();
} catch (ExecutionException e) {
// nodes after retries
throw new IOException(e);
}
Set<String> roles = entry.getKey().getRoles();
instances.addAll(getInstances(roles, nodes));
}
Cluster cluster = new Cluster(instances);
for (ClusterActionEvent event : eventMap.values()) {
event.setCluster(cluster);
}
}
use of org.apache.whirr.Cluster.Instance in project whirr by apache.
the class ByonClusterAction method doAction.
@Override
protected void doAction(Map<InstanceTemplate, ClusterActionEvent> eventMap) throws IOException, InterruptedException {
final Collection<Future<ExecResponse>> futures = Sets.newHashSet();
List<NodeMetadata> nodes = Lists.newArrayList();
List<NodeMetadata> usedNodes = Lists.newArrayList();
int numberAllocated = 0;
Set<Instance> allInstances = Sets.newLinkedHashSet();
for (Entry<InstanceTemplate, ClusterActionEvent> entry : eventMap.entrySet()) {
final ClusterSpec clusterSpec = entry.getValue().getClusterSpec();
final StatementBuilder statementBuilder = entry.getValue().getStatementBuilder();
if (statementBuilder.isEmpty()) {
// skip
continue;
}
final ComputeServiceContext computeServiceContext = getCompute().apply(clusterSpec);
final ComputeService computeService = computeServiceContext.getComputeService();
LoginCredentials credentials = LoginCredentials.builder().user(clusterSpec.getClusterUser()).privateKey(clusterSpec.getPrivateKey()).build();
final RunScriptOptions options = overrideLoginCredentials(credentials);
if (numberAllocated == 0) {
for (ComputeMetadata compute : computeService.listNodes()) {
if (!(compute instanceof NodeMetadata)) {
throw new IllegalArgumentException("Not an instance of NodeMetadata: " + compute);
}
nodes.add((NodeMetadata) compute);
}
}
int num = entry.getKey().getNumberOfInstances();
Predicate<NodeMetadata> unused = not(in(usedNodes));
// TODO: This seems very fragile and a bug. It is not required that someone passes a hardware id,
// so this is likely to break badly. Even if there was, why do we assume it is splittable?!
// this logic should be refactored or removed ASAP
Predicate<NodeMetadata> instancePredicate = Predicates.alwaysTrue();
if (entry.getKey().getTemplate() != null) {
String hardwareId = entry.getKey().getTemplate().getHardwareId();
if (hardwareId != null)
instancePredicate = new TagsPredicate(StringUtils.split(hardwareId));
}
List<NodeMetadata> templateNodes = Lists.newArrayList(filter(nodes, and(unused, instancePredicate)));
if (templateNodes.size() < num) {
LOG.warn("Not enough nodes available for template " + StringUtils.join(entry.getKey().getRoles(), "+"));
}
templateNodes = templateNodes.subList(0, num);
usedNodes.addAll(templateNodes);
numberAllocated = usedNodes.size();
Set<Instance> templateInstances = getInstances(credentials, entry.getKey().getRoles(), templateNodes);
allInstances.addAll(templateInstances);
for (final Instance instance : templateInstances) {
futures.add(runStatementOnInstanceInCluster(statementBuilder, instance, clusterSpec, options));
}
}
for (Future<ExecResponse> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
if (action.equals(ClusterActionHandler.BOOTSTRAP_ACTION)) {
Cluster cluster = new Cluster(allInstances);
for (ClusterActionEvent event : eventMap.values()) {
event.setCluster(cluster);
}
}
}
use of org.apache.whirr.Cluster.Instance in project whirr by apache.
the class Utils method printSSHConnectionDetails.
/**
* Prints ssh commands that can be used to login into the nodes
*
* @param out
* @param clusterSpec
* @param cluster
*/
public static void printSSHConnectionDetails(PrintStream out, ClusterSpec clusterSpec, Cluster cluster, int maxPrint) {
out.println("\nYou can log into instances using the following ssh commands:");
String user = clusterSpec.getClusterUser() != null ? clusterSpec.getClusterUser() : clusterSpec.getTemplate().getLoginUser();
String pkFile = clusterSpec.getPrivateKeyFile().getAbsolutePath();
int counter = 0;
for (Instance instance : cluster.getInstances()) {
StringBuilder roles = new StringBuilder();
for (String role : instance.getRoles()) {
if (roles.length() != 0) {
roles.append('+');
}
roles.append(role);
}
out.printf("[%s]: ssh -i %s -o \"UserKnownHostsFile /dev/null\" -o StrictHostKeyChecking=no %s@%s\n", roles.toString(), pkFile, user, instance.getPublicIp());
if (counter > maxPrint) {
out.println("... Too many instances, truncating.");
break;
}
counter++;
}
}
Aggregations