use of org.jclouds.compute.ComputeServiceContext in project whirr by apache.
the class HadoopService method launchCluster.
@Override
public HadoopCluster launchCluster(ClusterSpec clusterSpec) throws IOException {
ComputeServiceContext computeServiceContext = ComputeServiceContextBuilder.build(clusterSpec);
ComputeService computeService = computeServiceContext.getComputeService();
// Launch Hadoop "master" (NN and JT)
// deal with user packages and autoshutdown with extra runurls
String hadoopInstallRunUrl = clusterSpec.getConfiguration().getString("whirr.hadoop-install-runurl", "apache/hadoop/install");
byte[] nnjtBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s nn,jt -c %s", hadoopInstallRunUrl, clusterSpec.getProvider()));
TemplateBuilder masterTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(nnjtBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
// TODO extract this logic elsewhere
if (clusterSpec.getProvider().equals("ec2"))
masterTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
Template masterTemplate = masterTemplateBuilder.build();
InstanceTemplate instanceTemplate = clusterSpec.getInstanceTemplate(MASTER_ROLE);
checkNotNull(instanceTemplate);
checkArgument(instanceTemplate.getNumberOfInstances() == 1);
Set<? extends NodeMetadata> nodes;
try {
nodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), 1, masterTemplate);
} catch (RunNodesException e) {
// TODO: can we do better here (retry?)
throw new IOException(e);
}
NodeMetadata node = Iterables.getOnlyElement(nodes);
InetAddress namenodePublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
InetAddress jobtrackerPublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, WEB_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, NAMENODE_WEB_UI_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, JOBTRACKER_WEB_UI_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), NAMENODE_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), JOBTRACKER_PORT);
if (!namenodePublicAddress.equals(jobtrackerPublicAddress)) {
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), NAMENODE_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), JOBTRACKER_PORT);
}
// Launch slaves (DN and TT)
byte[] slaveBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s dn,tt -n %s -j %s", hadoopInstallRunUrl, namenodePublicAddress.getHostName(), jobtrackerPublicAddress.getHostName()));
TemplateBuilder slaveTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(slaveBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
// TODO extract this logic elsewhere
if (clusterSpec.getProvider().equals("ec2"))
slaveTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
Template slaveTemplate = slaveTemplateBuilder.build();
instanceTemplate = clusterSpec.getInstanceTemplate(WORKER_ROLE);
checkNotNull(instanceTemplate);
Set<? extends NodeMetadata> workerNodes;
try {
workerNodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), instanceTemplate.getNumberOfInstances(), slaveTemplate);
} catch (RunNodesException e) {
// TODO: don't bail out if only a few have failed to start
throw new IOException(e);
}
// TODO: wait for TTs to come up (done in test for the moment)
Set<Instance> instances = Sets.union(getInstances(MASTER_ROLE, Collections.singleton(node)), getInstances(WORKER_ROLE, workerNodes));
Properties config = createClientSideProperties(namenodePublicAddress, jobtrackerPublicAddress);
return new HadoopCluster(instances, config);
}
use of org.jclouds.compute.ComputeServiceContext in project whirr by apache.
the class ZooKeeperService method launchCluster.
@Override
public ZooKeeperCluster launchCluster(ClusterSpec clusterSpec) throws IOException {
ComputeServiceContext computeServiceContext = ComputeServiceContextBuilder.build(clusterSpec);
ComputeService computeService = computeServiceContext.getComputeService();
byte[] bootScript = RunUrlBuilder.runUrls("sun/java/install", "apache/zookeeper/install");
TemplateBuilder templateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(bootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
// TODO extract this logic elsewhere
if (clusterSpec.getProvider().equals("ec2"))
templateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
Template template = templateBuilder.build();
InstanceTemplate instanceTemplate = clusterSpec.getInstanceTemplate(ZOOKEEPER_ROLE);
checkNotNull(instanceTemplate);
int ensembleSize = instanceTemplate.getNumberOfInstances();
Set<? extends NodeMetadata> nodeMap;
try {
nodeMap = computeService.runNodesWithTag(clusterSpec.getClusterName(), ensembleSize, template);
} catch (RunNodesException e) {
// TODO: can we do better here - proceed if ensemble is big enough?
throw new IOException(e);
}
FirewallSettings.authorizeIngress(computeServiceContext, nodeMap, clusterSpec, CLIENT_PORT);
List<NodeMetadata> nodes = Lists.newArrayList(nodeMap);
// Pass list of all servers in ensemble to configure script.
// Position is significant: i-th server has id i.
String servers = Joiner.on(' ').join(getPrivateIps(nodes));
byte[] configureScript = RunUrlBuilder.runUrls("apache/zookeeper/post-configure " + servers);
try {
computeService.runScriptOnNodesMatching(runningWithTag(clusterSpec.getClusterName()), configureScript);
} catch (RunScriptOnNodesException e) {
// TODO: retry
throw new IOException(e);
}
String hosts = Joiner.on(',').join(getHosts(nodes));
return new ZooKeeperCluster(getInstances(nodes), hosts);
}
use of org.jclouds.compute.ComputeServiceContext in project legacy-jclouds-examples by jclouds.
the class MainApp method main.
public static void main(String[] args) {
if (args.length < PARAMETERS) {
throw new IllegalArgumentException(INVALID_SYNTAX);
}
// Arguments
String accessKeyId = args[0];
String secretKey = args[1];
ComputeServiceContext awsEC2Context = null;
RestContext<CloudWatchClient, CloudWatchAsyncClient> cloudWatchContext = null;
try {
cloudWatchContext = ContextBuilder.newBuilder(new AWSCloudWatchProviderMetadata()).credentials(accessKeyId, secretKey).build();
awsEC2Context = ContextBuilder.newBuilder(new AWSEC2ProviderMetadata()).credentials(accessKeyId, secretKey).build(ComputeServiceContext.class);
// Get all nodes
Set<? extends ComputeMetadata> allNodes = awsEC2Context.getComputeService().listNodes();
for (ComputeMetadata node : allNodes) {
String nodeId = node.getProviderId();
String region = getRegion(node.getLocation());
MetricClient metricClient = cloudWatchContext.getApi().getMetricClientForRegion(region);
int metricsCount = getMetricsCountForInstance(cloudWatchContext.getApi(), region, nodeId);
double[] cpuUtilization = getCPUUtilizationStatsForInstanceOverTheLast24Hours(metricClient, nodeId);
String cpuUtilizationHeader = " CPU utilization statistics: ";
DecimalFormat df = new DecimalFormat("#.##");
System.out.println(nodeId + " CloudWatch Metrics (Past 24 hours)");
System.out.println(" Total metrics stored: " + metricsCount);
if (cpuUtilization == null) {
System.out.println(cpuUtilizationHeader + "Unable to compute as there are no CPU utilization " + "metrics stored.");
} else {
System.out.println(cpuUtilizationHeader + df.format(cpuUtilization[0]) + "% (avg), " + df.format(cpuUtilization[1]) + "% (max), " + df.format(cpuUtilization[2]) + "% (min)");
}
}
} finally {
if (awsEC2Context != null) {
awsEC2Context.close();
}
if (cloudWatchContext != null) {
cloudWatchContext.close();
}
}
}
use of org.jclouds.compute.ComputeServiceContext in project legacy-jclouds-examples by jclouds.
the class MainApp method initController.
private static MinecraftController initController(String provider, String identity, String credential, String group) {
Properties properties = new Properties();
properties.setProperty("minecraft.port", "25565");
properties.setProperty("minecraft.group", group);
properties.setProperty("minecraft.ms", "1024");
properties.setProperty("minecraft.mx", "1024");
properties.setProperty("minecraft.url", "https://s3.amazonaws.com/MinecraftDownload/launcher/minecraft_server.jar");
if ("aws-ec2".equals(provider)) {
// since minecraft download is in s3 on us-east, lowest latency is from
// there
properties.setProperty(PROPERTY_REGIONS, "us-east-1");
properties.setProperty("jclouds.ec2.ami-query", "owner-id=137112412989;state=available;image-type=machine");
properties.setProperty("jclouds.ec2.cc-ami-query", "");
}
// example of injecting a ssh implementation
Iterable<Module> modules = ImmutableSet.<Module>of(new SshjSshClientModule(), new SLF4JLoggingModule(), new EnterpriseConfigurationModule(), // This is extended stuff you might inject!!
new ConfigureMinecraftDaemon());
ContextBuilder builder = ContextBuilder.newBuilder(provider).credentials(identity, credential).modules(modules).overrides(properties);
System.out.printf(">> initializing %s%n", builder.getApiMetadata());
ComputeServiceContext context = builder.buildView(ComputeServiceContext.class);
context.utils().eventBus().register(ScriptLogger.INSTANCE);
return context.utils().injector().getInstance(MinecraftController.class);
}
use of org.jclouds.compute.ComputeServiceContext in project legacy-jclouds-examples by jclouds.
the class CloudServersPublish method init.
private void init(List<String> args) {
// The provider configures jclouds To use the Rackspace Cloud (US)
// To use the Rackspace Cloud (UK) set the provider to "rackspace-cloudservers-uk"
String provider = "rackspace-cloudservers-us";
String username = args.get(0);
String apiKey = args.get(1);
numServers = args.size() == 3 ? Integer.valueOf(args.get(2)) : 1;
Iterable<Module> modules = ImmutableSet.<Module>of(new SshjSshClientModule());
// These properties control how often jclouds polls for a status udpate
Properties overrides = new Properties();
overrides.setProperty(ComputeServiceProperties.POLL_INITIAL_PERIOD, Constants.POLL_PERIOD_TWENTY_SECONDS);
overrides.setProperty(ComputeServiceProperties.POLL_MAX_PERIOD, Constants.POLL_PERIOD_TWENTY_SECONDS);
ComputeServiceContext context = ContextBuilder.newBuilder(provider).credentials(username, apiKey).overrides(overrides).modules(modules).buildView(ComputeServiceContext.class);
compute = context.getComputeService();
}
Aggregations