use of org.jclouds.compute.domain.NodeMetadata in project legacy-jclouds-examples by jclouds.
the class MinecraftController method createNodeWithMinecraft.
private NodeMetadata createNodeWithMinecraft() {
int javaPlusOverhead = maxHeap + 256;
NodeMetadata node = nodeManager.createNodeWithAdminUserAndJDKInGroupOpeningPortAndMinRam(group, port, javaPlusOverhead);
nodeManager.startDaemonOnNode(daemonFactory.get(), node.getId());
return node;
}
use of org.jclouds.compute.domain.NodeMetadata in project legacy-jclouds-examples by jclouds.
the class NodeManager method createNodeWithAdminUserAndJDKInGroupOpeningPortAndMinRam.
public NodeMetadata createNodeWithAdminUserAndJDKInGroupOpeningPortAndMinRam(String group, int port, int minRam) {
ImmutableMap<String, String> userMetadata = ImmutableMap.<String, String>of("Name", group);
// we want everything as defaults except ram
Template defaultTemplate = compute.templateBuilder().build();
Template minecraft = compute.templateBuilder().fromTemplate(defaultTemplate).minRam(minRam).build();
// setup the template to customize the node with jdk, etc. also opening ports.
Statement bootstrap = newStatementList(AdminAccess.standard(), InstallJDK.fromOpenJDK());
minecraft.getOptions().inboundPorts(22, port).userMetadata(userMetadata).runScript(bootstrap);
// example of using a cloud-specific hook
if (minecraft.getOptions() instanceof AWSEC2TemplateOptions)
minecraft.getOptions().as(AWSEC2TemplateOptions.class).enableMonitoring();
logger.info(">> creating node type(%s) in group %s, opening ports 22, %s with admin user and jdk", minecraft.getHardware().getId(), group, port);
try {
NodeMetadata node = getOnlyElement(compute.createNodesInGroup(group, 1, minecraft));
logger.info("<< available node(%s) os(%s) publicAddresses%s", node.getId(), node.getOperatingSystem(), node.getPublicAddresses());
return node;
} catch (RunNodesException e) {
throw destroyBadNodesAndPropagate(e);
}
}
use of org.jclouds.compute.domain.NodeMetadata in project gora by apache.
the class ChefSoftwareProvisioning method performChefComputeServiceBootstrapping.
private static void performChefComputeServiceBootstrapping(Properties properties) throws IOException, InstantiationException, IllegalAccessException {
// Get the credentials that will be used to authenticate to the Chef server
String rsContinent = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_CONTINENT, "rackspace-cloudservers-us");
String rsUser = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_USERNAME, "asf-gora");
String rsApiKey = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_APIKEY, null);
String rsRegion = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), RS_REGION, "DFW");
String client = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), CHEF_CLIENT, System.getProperty("user.name"));
String organization = DataStoreFactory.findProperty(properties, MemStore.class.newInstance(), CHEF_ORGANIZATION, null);
String pemFile = System.getProperty("user.home") + "/.chef/" + client + ".pem";
String credential = Files.toString(new File(pemFile), Charsets.UTF_8);
// Provide the validator information to let the nodes to auto-register themselves
// in the Chef server during bootstrap
String validator = organization + "-validator";
String validatorPemFile = System.getProperty("user.home") + "/.chef/" + validator + ".pem";
String validatorCredential = Files.toString(new File(validatorPemFile), Charsets.UTF_8);
Properties chefConfig = new Properties();
chefConfig.put(ChefProperties.CHEF_VALIDATOR_NAME, validator);
chefConfig.put(ChefProperties.CHEF_VALIDATOR_CREDENTIAL, validatorCredential);
// Create the connection to the Chef server
ChefContext chefContext = ContextBuilder.newBuilder("chef").endpoint("https://api.opscode.com/organizations/" + organization).credentials(client, credential).overrides(chefConfig).buildView(ChefContext.class);
// Create the connection to the compute provider. Note that ssh will be used to bootstrap chef
ComputeServiceContext computeContext = ContextBuilder.newBuilder(rsContinent).endpoint(rsRegion).credentials(rsUser, rsApiKey).modules(ImmutableSet.<Module>of(new SshjSshClientModule())).buildView(ComputeServiceContext.class);
// Group all nodes in both Chef and the compute provider by this group
String group = "jclouds-chef-goraci";
// Set the recipe to install and the configuration values to override
String recipe = "apache2";
JsonBall attributes = new JsonBall("{\"apache\": {\"listen_ports\": \"8080\"}}");
// Check to see if the recipe you want exists
List<String> runlist = null;
Iterable<? extends CookbookVersion> cookbookVersions = chefContext.getChefService().listCookbookVersions();
if (any(cookbookVersions, CookbookVersionPredicates.containsRecipe(recipe))) {
runlist = new RunListBuilder().addRecipe(recipe).build();
}
for (Iterator<String> iterator = runlist.iterator(); iterator.hasNext(); ) {
String string = (String) iterator.next();
LOG.info(string);
}
// Update the chef service with the run list you wish to apply to all nodes in the group
// and also provide the json configuration used to customize the desired values
BootstrapConfig config = BootstrapConfig.builder().runList(runlist).attributes(attributes).build();
chefContext.getChefService().updateBootstrapConfigForGroup(group, config);
// Build the script that will bootstrap the node
Statement bootstrap = chefContext.getChefService().createBootstrapScriptForGroup(group);
TemplateBuilder templateBuilder = computeContext.getComputeService().templateBuilder();
templateBuilder.options(runScript(bootstrap));
// Run a node on the compute provider that bootstraps chef
try {
Set<? extends NodeMetadata> nodes = computeContext.getComputeService().createNodesInGroup(group, 1, templateBuilder.build());
for (NodeMetadata nodeMetadata : nodes) {
LOG.info("<< node %s: %s%n", nodeMetadata.getId(), concat(nodeMetadata.getPrivateAddresses(), nodeMetadata.getPublicAddresses()));
}
} catch (RunNodesException e) {
throw new RuntimeException(e.getMessage());
}
// Release resources
chefContext.close();
computeContext.close();
}
use of org.jclouds.compute.domain.NodeMetadata in project hive by apache.
the class CloudComputeService method createFilterPTestPredicate.
static Predicate<ComputeMetadata> createFilterPTestPredicate(final String groupName, final String groupTag) {
return new Predicate<ComputeMetadata>() {
@Override
public boolean apply(ComputeMetadata computeMetadata) {
NodeMetadata nodeMetadata = (NodeMetadata) computeMetadata;
return nodeMetadata.getStatus() == Status.RUNNING && isPTestHost(nodeMetadata);
}
private boolean isPTestHost(NodeMetadata node) {
String result = "false non-ptest host";
if (groupName.equalsIgnoreCase(node.getGroup())) {
result = "true due to group " + groupName;
return true;
}
if (Strings.nullToEmpty(node.getName()).startsWith(groupName)) {
result = "true due to name " + groupName;
return true;
}
if (node.getTags().contains(groupTag)) {
result = "true due to tag " + groupName;
return true;
}
LOG.debug("Found node: " + node + ", Result: " + result);
return false;
}
};
}
use of org.jclouds.compute.domain.NodeMetadata in project hive by apache.
the class CloudExecutionContextProvider method performBackgroundWork.
private synchronized void performBackgroundWork() {
LOG.info("Performing background work");
Map<String, Long> terminatedHosts = Maps.newHashMap();
synchronized (mTerminatedHosts) {
terminatedHosts.putAll(mTerminatedHosts);
}
for (NodeMetadata node : getRunningNodes()) {
String ip = publicIpOrHostname(node);
if (terminatedHosts.containsKey(ip)) {
terminateInternal(node);
LOG.warn("Found zombie node: " + node + " previously terminated at " + new Date(terminatedHosts.get(ip)));
} else if (!mLiveHosts.containsKey(ip)) {
LOG.warn("Found zombie node: " + node + " previously unknown to ptest");
}
}
}
Aggregations