use of org.apache.whirr.service.Cluster.Instance in project whirr by apache.
the class CassandraServiceTest method waitForCassandra.
private void waitForCassandra() {
for (Instance instance : cluster.getInstances()) {
while (true) {
try {
TSocket socket = new TSocket(instance.getPublicAddress().getHostAddress(), CassandraService.CLIENT_PORT);
socket.open();
TBinaryProtocol protocol = new TBinaryProtocol(socket);
Cassandra.Client client = new Cassandra.Client(protocol);
client.describe_cluster_name();
socket.close();
break;
} catch (TException e) {
System.out.print(".");
try {
Thread.sleep(1000);
} catch (InterruptedException e1) {
break;
}
}
}
}
}
use of org.apache.whirr.service.Cluster.Instance in project whirr by apache.
the class CassandraServiceTest method testInstances.
@Test
public void testInstances() throws Exception {
Set<String> endPoints = new HashSet<String>();
for (Instance instance : cluster.getInstances()) {
TSocket socket = new TSocket(instance.getPublicAddress().getHostAddress(), CassandraService.CLIENT_PORT);
socket.open();
TBinaryProtocol protocol = new TBinaryProtocol(socket);
Cassandra.Client client = new Cassandra.Client(protocol);
List<TokenRange> tr = client.describe_ring(KEYSPACE);
for (TokenRange tokenRange : tr) {
endPoints.addAll(tokenRange.endpoints);
}
socket.close();
}
for (Instance instance : cluster.getInstances()) {
String address = instance.getPrivateAddress().getHostAddress();
assertTrue(address + " not in cluster!", endPoints.remove(address));
}
assertTrue("Unknown node returned: " + endPoints.toString(), endPoints.isEmpty());
}
use of org.apache.whirr.service.Cluster.Instance in project whirr by apache.
the class HadoopService method launchCluster.
@Override
public HadoopCluster launchCluster(ClusterSpec clusterSpec) throws IOException {
ComputeServiceContext computeServiceContext = ComputeServiceContextBuilder.build(clusterSpec);
ComputeService computeService = computeServiceContext.getComputeService();
// Launch Hadoop "master" (NN and JT)
// deal with user packages and autoshutdown with extra runurls
String hadoopInstallRunUrl = clusterSpec.getConfiguration().getString("whirr.hadoop-install-runurl", "apache/hadoop/install");
byte[] nnjtBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s nn,jt -c %s", hadoopInstallRunUrl, clusterSpec.getProvider()));
TemplateBuilder masterTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(nnjtBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
// TODO extract this logic elsewhere
if (clusterSpec.getProvider().equals("ec2"))
masterTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
Template masterTemplate = masterTemplateBuilder.build();
InstanceTemplate instanceTemplate = clusterSpec.getInstanceTemplate(MASTER_ROLE);
checkNotNull(instanceTemplate);
checkArgument(instanceTemplate.getNumberOfInstances() == 1);
Set<? extends NodeMetadata> nodes;
try {
nodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), 1, masterTemplate);
} catch (RunNodesException e) {
// TODO: can we do better here (retry?)
throw new IOException(e);
}
NodeMetadata node = Iterables.getOnlyElement(nodes);
InetAddress namenodePublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
InetAddress jobtrackerPublicAddress = InetAddress.getByName(Iterables.get(node.getPublicAddresses(), 0));
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, WEB_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, NAMENODE_WEB_UI_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, JOBTRACKER_WEB_UI_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), NAMENODE_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, namenodePublicAddress.getHostAddress(), JOBTRACKER_PORT);
if (!namenodePublicAddress.equals(jobtrackerPublicAddress)) {
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), NAMENODE_PORT);
FirewallSettings.authorizeIngress(computeServiceContext, node, clusterSpec, jobtrackerPublicAddress.getHostAddress(), JOBTRACKER_PORT);
}
// Launch slaves (DN and TT)
byte[] slaveBootScript = RunUrlBuilder.runUrls("sun/java/install", String.format("%s dn,tt -n %s -j %s", hadoopInstallRunUrl, namenodePublicAddress.getHostName(), jobtrackerPublicAddress.getHostName()));
TemplateBuilder slaveTemplateBuilder = computeService.templateBuilder().osFamily(UBUNTU).options(runScript(slaveBootScript).installPrivateKey(clusterSpec.readPrivateKey()).authorizePublicKey(clusterSpec.readPublicKey()));
// TODO extract this logic elsewhere
if (clusterSpec.getProvider().equals("ec2"))
slaveTemplateBuilder.imageNameMatches(".*10\\.?04.*").osDescriptionMatches("^ubuntu-images.*").architecture(Architecture.X86_32);
Template slaveTemplate = slaveTemplateBuilder.build();
instanceTemplate = clusterSpec.getInstanceTemplate(WORKER_ROLE);
checkNotNull(instanceTemplate);
Set<? extends NodeMetadata> workerNodes;
try {
workerNodes = computeService.runNodesWithTag(clusterSpec.getClusterName(), instanceTemplate.getNumberOfInstances(), slaveTemplate);
} catch (RunNodesException e) {
// TODO: don't bail out if only a few have failed to start
throw new IOException(e);
}
// TODO: wait for TTs to come up (done in test for the moment)
Set<Instance> instances = Sets.union(getInstances(MASTER_ROLE, Collections.singleton(node)), getInstances(WORKER_ROLE, workerNodes));
Properties config = createClientSideProperties(namenodePublicAddress, jobtrackerPublicAddress);
return new HadoopCluster(instances, config);
}
Aggregations