use of org.apache.whirr.Cluster in project whirr by apache.
the class GangliaMetadClusterActionHandler method afterConfigure.
@Override
protected void afterConfigure(ClusterActionEvent event) {
ClusterSpec clusterSpec = event.getClusterSpec();
Cluster cluster = event.getCluster();
LOG.info("Completed configuration of {}", clusterSpec.getClusterName());
String hosts = Joiner.on(',').join(getHosts(cluster.getInstancesMatching(role(GANGLIA_METAD_ROLE))));
LOG.info("Meta host: {}. You should be able to connect on http://{}/ganglia", hosts, hosts);
}
use of org.apache.whirr.Cluster in project whirr by apache.
the class DruidServiceTest method getQueryInfo.
private String getQueryInfo() throws Exception {
for (int i = 0; i < 20; i++) {
try {
Cluster.Instance instance = Iterables.get(cluster.getInstancesMatching(role(DruidRealtimeClusterActionHandler.ROLE)), 0);
String address = instance.getPublicAddress().getHostAddress();
String port = DruidRealtimeClusterActionHandler.PORT.toString();
URL url = new URL(String.format("http://%s:%s/druid/v2", address, port));
String query = "{\n" + " \"queryType\":\"segmentMetadata\",\n" + " \"dataSource\":\"sample_datasource\",\n" + " \"intervals\":[\"2013-01-01/2014-01-01\"],\n" + "}";
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(conn.getOutputStream());
writer.write(query);
writer.flush();
String line;
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
StringBuilder builder = new StringBuilder();
while ((line = reader.readLine()) != null) {
builder.append(line);
}
writer.close();
reader.close();
return builder.toString();
} catch (IOException e) {
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
}
}
}
throw new Exception("Unable to get cluster metadata info.");
}
use of org.apache.whirr.Cluster in project whirr by apache.
the class ElasticSearchConfigurationBuilderTest method testDefaultUnicastConfig.
@Test
public void testDefaultUnicastConfig() throws Exception {
Configuration baseConfig = new PropertiesConfiguration();
baseConfig.addProperty("whirr.provider", "cloudservers-us");
ClusterSpec spec = ClusterSpec.withTemporaryKeys(baseConfig);
Cluster cluster = mock(Cluster.class);
Set<Cluster.Instance> instances = Sets.newLinkedHashSet();
for (String host : Lists.newArrayList("10.0.0.1", "10.0.0.2")) {
Cluster.Instance instance = mock(Cluster.Instance.class);
when(instance.getPrivateIp()).thenReturn(host);
instances.add(instance);
}
when(cluster.getInstancesMatching((Predicate<Cluster.Instance>) any())).thenReturn(instances);
Configuration config = ElasticSearchConfigurationBuilder.buildConfig(spec, cluster);
String content = StringUtils.join(ElasticSearchConfigurationBuilder.asYamlLines(config.subset(ElasticSearchConfigurationBuilder.ES_PREFIX)), "\n");
assertThat(content, is("index:\n" + " store:\n" + " type: memory\n" + "gateway:\n" + " type: none\n" + "discovery:\n" + " zen:\n" + " ping:\n" + " multicast:\n" + " enabled: false\n" + " unicast:\n" + " hosts: [\"10.0.0.1:9300\", \"10.0.0.2:9300\"]"));
}
use of org.apache.whirr.Cluster in project whirr by apache.
the class HamaGroomServerClusterActionHandler method beforeConfigure.
@Override
protected void beforeConfigure(ClusterActionEvent event) throws IOException, InterruptedException {
ClusterSpec clusterSpec = event.getClusterSpec();
Cluster cluster = event.getCluster();
Instance instance = cluster.getInstanceMatching(role(HamaMasterClusterActionHandler.ROLE));
InetAddress masterPublicAddress = instance.getPublicAddress();
event.getFirewallManager().addRules(Rule.create().destination(instance).ports(GROOMSERVER_PORT));
handleFirewallRules(event);
String hamaConfigureFunction = getConfiguration(clusterSpec).getString(HamaConstants.KEY_CONFIGURE_FUNCTION, HamaConstants.FUNCTION_POST_CONFIGURE);
String master = masterPublicAddress.getHostName();
String quorum = ZooKeeperCluster.getHosts(cluster);
String tarurl = prepareRemoteFileUrl(event, getConfiguration(clusterSpec).getString(HamaConstants.KEY_TARBALL_URL));
addStatement(event, call("retry_helpers"));
addStatement(event, call(hamaConfigureFunction, ROLE, HamaConstants.PARAM_MASTER, master, HamaConstants.PARAM_QUORUM, quorum, HamaConstants.PARAM_TARBALL_URL, tarurl));
String hamaStartFunction = getConfiguration(clusterSpec).getString(HamaConstants.KEY_START_FUNCTION, HamaConstants.FUNCTION_START);
addStatement(event, call(hamaStartFunction, ROLE, HamaConstants.PARAM_TARBALL_URL, tarurl));
}
use of org.apache.whirr.Cluster in project whirr by apache.
the class DruidClusterActionHandler method beforeConfigure.
// Always over-ridden in subclass
@Override
protected void beforeConfigure(ClusterActionEvent event) throws IOException {
ClusterSpec clusterSpec = event.getClusterSpec();
Cluster cluster = event.getCluster();
Configuration conf = getConfiguration(clusterSpec);
LOG.info("Role: [" + getRole() + "] Port: [" + getPort() + "]");
// Open a port for the service
event.getFirewallManager().addRule(FirewallManager.Rule.create().destination(role(getRole())).port(getPort()));
handleFirewallRules(event);
// Zookeeper quorum
String quorum = ZooKeeperCluster.getHosts(cluster, true);
LOG.info("ZookeeperCluster.getHosts(cluster): " + quorum);
// Get MySQL Server address
String mysqlAddress = DruidCluster.getMySQLPublicAddress(cluster);
LOG.info("DruidCluster.getMySQLPublicAddress(cluster).getHostAddress(): " + mysqlAddress);
// Get Blobstore and bucket
Map<String, String> env = System.getenv();
String identity = clusterSpec.getBlobStoreIdentity();
String credential = clusterSpec.getBlobStoreCredential();
String s3Bucket = conf.getString("whirr.druid.pusher.s3.bucket");
LOG.info("whirr.druid.pusher.s3.bucket: " + s3Bucket);
addStatement(event, call("retry_helpers"));
addStatement(event, call("configure_hostnames"));
addStatement(event, call("configure_druid", getRole(), quorum, getPort().toString(), mysqlAddress, identity, credential, s3Bucket));
// Configure the realtime spec for realtime nodes
if (getRole().equals("druid-realtime")) {
String specPath = (String) conf.getProperty("whirr.druid.realtime.spec.path");
LOG.info("whirr.druid.realtime.spec.path" + specPath);
if (specPath == null || specPath.equals("")) {
// Default to the included realtime.spec
specPath = DruidClusterActionHandler.class.getResource("/" + "realtime.spec").getPath();
// prepareRemoteFileUrl(event, specPath);
}
// Quorum is a variable in the realtime.spec
String realtimeSpec = "'" + readFile(specPath) + "'";
addStatement(event, call("configure_realtime", quorum, realtimeSpec));
}
}
Aggregations