Search in sources :

Example 6 with RunNodesException

use of org.jclouds.compute.RunNodesException in project legacy-jclouds-examples by jclouds.

the class NodeManager method createNodeWithAdminUserAndJDKInGroupOpeningPortAndMinRam.

public NodeMetadata createNodeWithAdminUserAndJDKInGroupOpeningPortAndMinRam(String group, int port, int minRam) {
    ImmutableMap<String, String> userMetadata = ImmutableMap.<String, String>of("Name", group);
    // we want everything as defaults except ram
    Template defaultTemplate = compute.templateBuilder().build();
    Template minecraft = compute.templateBuilder().fromTemplate(defaultTemplate).minRam(minRam).build();
    // setup the template to customize the node with jdk, etc. also opening ports.
    Statement bootstrap = newStatementList(AdminAccess.standard(), InstallJDK.fromOpenJDK());
    minecraft.getOptions().inboundPorts(22, port).userMetadata(userMetadata).runScript(bootstrap);
    // example of using a cloud-specific hook
    if (minecraft.getOptions() instanceof AWSEC2TemplateOptions)
        minecraft.getOptions().as(AWSEC2TemplateOptions.class).enableMonitoring();
    logger.info(">> creating node type(%s) in group %s, opening ports 22, %s with admin user and jdk", minecraft.getHardware().getId(), group, port);
    try {
        NodeMetadata node = getOnlyElement(compute.createNodesInGroup(group, 1, minecraft));
        logger.info("<< available node(%s) os(%s) publicAddresses%s", node.getId(), node.getOperatingSystem(), node.getPublicAddresses());
        return node;
    } catch (RunNodesException e) {
        throw destroyBadNodesAndPropagate(e);
    }
}
Also used : NodeMetadata(org.jclouds.compute.domain.NodeMetadata) RunNodesException(org.jclouds.compute.RunNodesException) Statement(org.jclouds.scriptbuilder.domain.Statement) AWSEC2TemplateOptions(org.jclouds.aws.ec2.compute.AWSEC2TemplateOptions) Template(org.jclouds.compute.domain.Template)

Example 7 with RunNodesException

use of org.jclouds.compute.RunNodesException in project gora by apache.

the class ChefSoftwareProvisioning method performChefComputeServiceBootstrapping.

private static void performChefComputeServiceBootstrapping(Properties properties) throws IOException, InstantiationException, IllegalAccessException {
    // Get the credentials that will be used to authenticate to the Chef server
    String rsContinent = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_CONTINENT, "rackspace-cloudservers-us");
    String rsUser = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_USERNAME, "asf-gora");
    String rsApiKey = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_APIKEY, null);
    String rsRegion = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_REGION, "DFW");
    String client = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), CHEF_CLIENT, System.getProperty("user.name"));
    String organization = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), CHEF_ORGANIZATION, null);
    String pemFile = System.getProperty("user.home") + "/.chef/" + client + ".pem";
    String credential = Files.toString(new File(pemFile), Charsets.UTF_8);
    // Provide the validator information to let the nodes to auto-register themselves
    // in the Chef server during bootstrap
    String validator = organization + "-validator";
    String validatorPemFile = System.getProperty("user.home") + "/.chef/" + validator + ".pem";
    String validatorCredential = Files.toString(new File(validatorPemFile), Charsets.UTF_8);
    Properties chefConfig = new Properties();
    chefConfig.put(ChefProperties.CHEF_VALIDATOR_NAME, validator);
    chefConfig.put(ChefProperties.CHEF_VALIDATOR_CREDENTIAL, validatorCredential);
    // Create the connection to the Chef server
    ChefContext chefContext = ContextBuilder.newBuilder("chef").endpoint("https://api.opscode.com/organizations/" + organization).credentials(client, credential).overrides(chefConfig).buildView(ChefContext.class);
    // Create the connection to the compute provider. Note that ssh will be used to bootstrap chef
    ComputeServiceContext computeContext = ContextBuilder.newBuilder(rsContinent).endpoint(rsRegion).credentials(rsUser, rsApiKey).modules(ImmutableSet.<Module>of(new SshjSshClientModule())).buildView(ComputeServiceContext.class);
    // Group all nodes in both Chef and the compute provider by this group
    String group = "jclouds-chef-goraci";
    // Set the recipe to install and the configuration values to override
    String recipe = "apache2";
    JsonBall attributes = new JsonBall("{\"apache\": {\"listen_ports\": \"8080\"}}");
    // Check to see if the recipe you want exists
    List<String> runlist = null;
    Iterable<? extends CookbookVersion> cookbookVersions = chefContext.getChefService().listCookbookVersions();
    if (any(cookbookVersions, CookbookVersionPredicates.containsRecipe(recipe))) {
        runlist = new RunListBuilder().addRecipe(recipe).build();
    }
    for (Iterator<String> iterator = runlist.iterator(); iterator.hasNext(); ) {
        String string = (String) iterator.next();
        LOG.info(string);
    }
    // Update the chef service with the run list you wish to apply to all nodes in the group
    // and also provide the json configuration used to customize the desired values
    BootstrapConfig config = BootstrapConfig.builder().runList(runlist).attributes(attributes).build();
    chefContext.getChefService().updateBootstrapConfigForGroup(group, config);
    // Build the script that will bootstrap the node
    Statement bootstrap = chefContext.getChefService().createBootstrapScriptForGroup(group);
    TemplateBuilder templateBuilder = computeContext.getComputeService().templateBuilder();
    templateBuilder.options(runScript(bootstrap));
    // Run a node on the compute provider that bootstraps chef
    try {
        Set<? extends NodeMetadata> nodes = computeContext.getComputeService().createNodesInGroup(group, 1, templateBuilder.build());
        for (NodeMetadata nodeMetadata : nodes) {
            LOG.info("<< node %s: %s%n", nodeMetadata.getId(), concat(nodeMetadata.getPrivateAddresses(), nodeMetadata.getPublicAddresses()));
        }
    } catch (RunNodesException e) {
        throw new RuntimeException(e.getMessage());
    }
    // Release resources
    chefContext.close();
    computeContext.close();
}
Also used : Statement(org.jclouds.scriptbuilder.domain.Statement) BootstrapConfig(org.jclouds.chef.domain.BootstrapConfig) TemplateBuilder(org.jclouds.compute.domain.TemplateBuilder) ComputeServiceContext(org.jclouds.compute.ComputeServiceContext) Properties(java.util.Properties) ChefProperties(org.jclouds.chef.config.ChefProperties) NodeMetadata(org.jclouds.compute.domain.NodeMetadata) ChefContext(org.jclouds.chef.ChefContext) MemStore(org.apache.gora.memory.store.MemStore) RunNodesException(org.jclouds.compute.RunNodesException) SshjSshClientModule(org.jclouds.sshj.config.SshjSshClientModule) RunListBuilder(org.jclouds.chef.util.RunListBuilder) Module(com.google.inject.Module) SshjSshClientModule(org.jclouds.sshj.config.SshjSshClientModule) JsonBall(org.jclouds.domain.JsonBall) File(java.io.File)

Example 8 with RunNodesException

use of org.jclouds.compute.RunNodesException in project whirr by apache.

the class HadoopService method launchCluster.

@Override
public HadoopCluster launchCluster(ClusterSpec clusterSpec) throws IOException {
    ComputeServiceContext computeServiceContext = ComputeServiceContextBuilder.build(clusterSpec);
    ComputeService computeService = computeServiceContext.getComputeService();
    // Launch Hadoop "master" (NN and JT)
    // deal with user packages and autoshutdown with extra runurls
    String hadoopInstallRunUrl = clusterSpec.getConfiguration().getString("whirr.hadoop-install-runurl", "apache/hadoop/install");
    byte[] nnjtBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s nn,jt -c %s", hadoopInstallRunUrl, clusterSpec.getProvider()));
    TemplateBuilder masterTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(nnjtBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
    // TODO extract this logic elsewhere
    if (clusterSpec.getProvider().equals("ec2"))
        masterTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
    Template masterTemplate = masterTemplateBuilder.build();
    InstanceTemplate instanceTemplate = clusterSpec.getInstanceTemplate(MASTER_ROLE);
    checkNotNull(instanceTemplate);
    checkArgument(instanceTemplate.getNumberOfInstances() == 1);
    Set<? extends NodeMetadata> nodes;
    try {
        nodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), 1, masterTemplate);
    } catch (RunNodesException e) {
        // TODO: can we do better here (retry?)
        throw new IOException(e);
    }
    NodeMetadata node = Iterables.getOnlyElement(nodes);
    InetAddress namenodePublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
    InetAddress jobtrackerPublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
    FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, WEB_PORT);
    FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, NAMENODE_WEB_UI_PORT);
    FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, JOBTRACKER_WEB_UI_PORT);
    FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), NAMENODE_PORT);
    FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), JOBTRACKER_PORT);
    if (!namenodePublicAddress.equals(jobtrackerPublicAddress)) {
        FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), NAMENODE_PORT);
        FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), JOBTRACKER_PORT);
    }
    // Launch slaves (DN and TT)
    byte[] slaveBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s dn,tt -n %s -j %s", hadoopInstallRunUrl, namenodePublicAddress.getHostName(), jobtrackerPublicAddress.getHostName()));
    TemplateBuilder slaveTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(slaveBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
    // TODO extract this logic elsewhere
    if (clusterSpec.getProvider().equals("ec2"))
        slaveTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
    Template slaveTemplate = slaveTemplateBuilder.build();
    instanceTemplate = clusterSpec.getInstanceTemplate(WORKER_ROLE);
    checkNotNull(instanceTemplate);
    Set<? extends NodeMetadata> workerNodes;
    try {
        workerNodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), instanceTemplate.getNumberOfInstances(), slaveTemplate);
    } catch (RunNodesException e) {
        // TODO: don't bail out if only a few have failed to start
        throw new IOException(e);
    }
    // TODO: wait for TTs to come up (done in test for the moment)
    Set<Instance> instances = Sets.union(getInstances(MASTER_ROLE, Collections.singleton(node)), getInstances(WORKER_ROLE, workerNodes));
    Properties config = createClientSideProperties(namenodePublicAddress, jobtrackerPublicAddress);
    return new HadoopCluster(instances, config);
}
Also used : Instance(org.apache.whirr.service.Cluster.Instance) TemplateBuilder(org.jclouds.compute.domain.TemplateBuilder) ComputeServiceContext(org.jclouds.compute.ComputeServiceContext) IOException(java.io.IOException) Properties(java.util.Properties) ComputeService(org.jclouds.compute.ComputeService) Template(org.jclouds.compute.domain.Template) InstanceTemplate(org.apache.whirr.service.ClusterSpec.InstanceTemplate) NodeMetadata(org.jclouds.compute.domain.NodeMetadata) RunNodesException(org.jclouds.compute.RunNodesException) InetAddress(java.net.InetAddress) InstanceTemplate(org.apache.whirr.service.ClusterSpec.InstanceTemplate)

Example 9 with RunNodesException

use of org.jclouds.compute.RunNodesException in project whirr by apache.

the class ZooKeeperService method launchCluster.

@Override
public ZooKeeperCluster launchCluster(ClusterSpec clusterSpec) throws IOException {
    ComputeServiceContext computeServiceContext = ComputeServiceContextBuilder.build(clusterSpec);
    ComputeService computeService = computeServiceContext.getComputeService();
    byte[] bootScript = RunUrlBuilder.runUrls("sun/java/install", "apache/zookeeper/install");
    TemplateBuilder templateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(bootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
    // TODO extract this logic elsewhere
    if (clusterSpec.getProvider().equals("ec2"))
        templateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
    Template template = templateBuilder.build();
    InstanceTemplate instanceTemplate = clusterSpec.getInstanceTemplate(ZOOKEEPER_ROLE);
    checkNotNull(instanceTemplate);
    int ensembleSize = instanceTemplate.getNumberOfInstances();
    Set<? extends NodeMetadata> nodeMap;
    try {
        nodeMap = computeService.runNodesWithTag(clusterSpec.getClusterName(), ensembleSize, template);
    } catch (RunNodesException e) {
        // TODO: can we do better here - proceed if ensemble is big enough?
        throw new IOException(e);
    }
    FirewallSettings.authorizeIngress(computeServiceContext, nodeMap, clusterSpec, CLIENT_PORT);
    List<NodeMetadata> nodes = Lists.newArrayList(nodeMap);
    // Pass list of all servers in ensemble to configure script.
    // Position is significant: i-th server has id i.
    String servers = Joiner.on(' ').join(getPrivateIps(nodes));
    byte[] configureScript = RunUrlBuilder.runUrls("apache/zookeeper/post-configure " + servers);
    try {
        computeService.runScriptOnNodesMatching(runningWithTag(clusterSpec.getClusterName()), configureScript);
    } catch (RunScriptOnNodesException e) {
        // TODO: retry
        throw new IOException(e);
    }
    String hosts = Joiner.on(',').join(getHosts(nodes));
    return new ZooKeeperCluster(getInstances(nodes), hosts);
}
Also used : TemplateBuilder(org.jclouds.compute.domain.TemplateBuilder) ComputeServiceContext(org.jclouds.compute.ComputeServiceContext) IOException(java.io.IOException) ComputeService(org.jclouds.compute.ComputeService) Template(org.jclouds.compute.domain.Template) InstanceTemplate(org.apache.whirr.service.ClusterSpec.InstanceTemplate) NodeMetadata(org.jclouds.compute.domain.NodeMetadata) RunNodesException(org.jclouds.compute.RunNodesException) RunScriptOnNodesException(org.jclouds.compute.RunScriptOnNodesException) InstanceTemplate(org.apache.whirr.service.ClusterSpec.InstanceTemplate)

Example 10 with RunNodesException

use of org.jclouds.compute.RunNodesException in project hive by apache.

the class TestCloudExecutionContextProvider method setup.

@Before
public void setup() throws Exception {
    dataDir = baseDir.newFolder().getAbsolutePath();
    workingDir = baseDir.newFolder().getAbsolutePath();
    cloudComputeService = mock(CloudComputeService.class);
    sshCommandExecutor = new MockSSHCommandExecutor(LOG);
    node1 = mock(NodeMetadata.class);
    node2 = mock(NodeMetadata.class);
    node3 = mock(NodeMetadata.class);
    template = mock(Template.class);
    when(template.getLocation()).thenReturn(mock(Location.class));
    when(template.getImage()).thenReturn(mock(Image.class));
    when(template.getHardware()).thenReturn(mock(Hardware.class));
    when(node1.getHostname()).thenReturn("node1");
    when(node1.getPublicAddresses()).thenReturn(Collections.singleton("1.1.1.1"));
    when(node2.getHostname()).thenReturn("node2");
    when(node2.getPublicAddresses()).thenReturn(Collections.singleton("1.1.1.2"));
    when(node3.getHostname()).thenReturn("node3");
    when(node3.getPublicAddresses()).thenReturn(Collections.singleton("1.1.1.3"));
    runNodesException = new RunNodesException("", 2, template, Collections.singleton(node1), Collections.<String, Exception>emptyMap(), Collections.singletonMap(node2, new Exception("For testing")));
}
Also used : NodeMetadata(org.jclouds.compute.domain.NodeMetadata) MockSSHCommandExecutor(org.apache.hive.ptest.execution.MockSSHCommandExecutor) RunNodesException(org.jclouds.compute.RunNodesException) Hardware(org.jclouds.compute.domain.Hardware) Image(org.jclouds.compute.domain.Image) RunNodesException(org.jclouds.compute.RunNodesException) Template(org.jclouds.compute.domain.Template) Location(org.jclouds.domain.Location) Before(org.junit.Before)

Aggregations

RunNodesException (org.jclouds.compute.RunNodesException)12 NodeMetadata (org.jclouds.compute.domain.NodeMetadata)11 Template (org.jclouds.compute.domain.Template)7 TemplateBuilder (org.jclouds.compute.domain.TemplateBuilder)7 ComputeService (org.jclouds.compute.ComputeService)6 IOException (java.io.IOException)5 ComputeServiceContext (org.jclouds.compute.ComputeServiceContext)4 RunScriptOnNodesException (org.jclouds.compute.RunScriptOnNodesException)4 Statement (org.jclouds.scriptbuilder.domain.Statement)4 File (java.io.File)3 InstanceTemplate (org.apache.whirr.service.ClusterSpec.InstanceTemplate)3 LoginCredentials (org.jclouds.domain.LoginCredentials)3 Properties (java.util.Properties)2 RunListBuilder (org.jclouds.chef.util.RunListBuilder)2 Builder.overrideLoginCredentials (org.jclouds.compute.options.TemplateOptions.Builder.overrideLoginCredentials)2 RetryablePredicate (org.jclouds.predicates.RetryablePredicate)2 Predicate (com.google.common.base.Predicate)1 ImmutableList (com.google.common.collect.ImmutableList)1 Module (com.google.inject.Module)1 BufferedReader (java.io.BufferedReader)1