use of org.apache.whirr.ClusterControllerFactory in project whirr by apache.
the class ZooKeeperServiceTest method setUp.
@Before
public void setUp() throws Exception {
CompositeConfiguration config = new CompositeConfiguration();
if (System.getProperty("config") != null) {
config.addConfiguration(new PropertiesConfiguration(System.getProperty("config")));
}
config.addConfiguration(new PropertiesConfiguration("whirr-zookeeper-test.properties"));
clusterSpec = ClusterSpec.withTemporaryKeys(config);
controller = new ClusterControllerFactory().create(clusterSpec.getServiceName());
cluster = controller.launchCluster(clusterSpec);
hosts = ZooKeeperCluster.getHosts(cluster);
}
use of org.apache.whirr.ClusterControllerFactory in project whirr by apache.
the class HadoopClusterExample method main.
@Override
public int main(String[] args) throws Exception {
if (!System.getenv().containsKey("AWS_ACCESS_KEY_ID")) {
LOG.error("AWS_ACCESS_KEY_ID is undefined in the current environment");
return -1;
}
if (!System.getenv().containsKey("AWS_SECRET_ACCESS_KEY")) {
LOG.error("AWS_SECRET_ACCESS_KEY is undefined in the current environment");
return -2;
}
/**
* Start by loading cluster configuration file and creating a ClusterSpec object
*
* You can find the file in the resources folder.
*/
ClusterSpec spec = new ClusterSpec(new PropertiesConfiguration("whirr-hadoop-example.properties"));
/**
* Create an instance of the generic cluster controller
*/
ClusterControllerFactory factory = new ClusterControllerFactory();
ClusterController controller = factory.create(spec.getServiceName());
/**
* Start the cluster as defined in the configuration file
*/
HadoopProxy proxy = null;
try {
LOG.info("Starting cluster {}", spec.getClusterName());
Cluster cluster = controller.launchCluster(spec);
LOG.info("Starting local SOCKS proxy");
proxy = new HadoopProxy(spec, cluster);
proxy.start();
/**
* Obtain a Hadoop configuration object and wait for services to start
*/
Configuration config = getHadoopConfiguration(cluster);
JobConf job = new JobConf(config, HadoopClusterExample.class);
JobClient client = new JobClient(job);
waitToExitSafeMode(client);
waitForTaskTrackers(client);
/**
* Run a simple job to show that the cluster is available for work.
*/
runWordCountingJob(config);
} finally {
/**
* Stop the proxy and terminate all the cluster instances.
*/
if (proxy != null) {
proxy.stop();
}
controller.destroyCluster(spec);
return 0;
}
}
use of org.apache.whirr.ClusterControllerFactory in project whirr by apache.
the class PhaseExecutionBarrierTest method testNoRemoteExecutionOverlap.
@Test(timeout = TestConstants.ITEST_TIMEOUT)
public void testNoRemoteExecutionOverlap() throws Exception {
ClusterSpec spec = getTestClusterSpec();
ClusterController controller = (new ClusterControllerFactory()).create(spec.getServiceName());
try {
controller.launchCluster(spec);
Map<? extends NodeMetadata, ExecResponse> responseMap = controller.runScriptOnNodesMatching(spec, Predicates.<NodeMetadata>alwaysTrue(), exec("cat /tmp/bootstrap-start /tmp/bootstrap-end /tmp/configure-start"));
ExecResponse response = Iterables.get(responseMap.values(), 0);
LOG.info("Got response: {}", response);
String[] parts = Strings.split(response.getOutput(), '\n');
int bootstrapStart = parseInt(deleteWhitespace(parts[0]));
int bootstrapEnd = parseInt(deleteWhitespace(parts[1]));
int configureStart = parseInt(deleteWhitespace(parts[2]));
assertTrue(bootstrapStart < bootstrapEnd);
assertTrue(bootstrapEnd < configureStart);
} finally {
controller.destroyCluster(spec);
}
assertNoOverlapOnLocalMachine();
}
use of org.apache.whirr.ClusterControllerFactory in project whirr by apache.
the class AbstractClusterCommandTest method testCreateServerWithInvalidClusterControllerName.
/**
* Ensure that an invalid service name uses the default (after logging a
* warning).
*/
@Test
public void testCreateServerWithInvalidClusterControllerName() throws Exception {
AbstractClusterCommand clusterCommand = new AbstractClusterCommand("name", "description", new ClusterControllerFactory()) {
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
return 0;
}
};
// following should not fail
clusterCommand.createClusterController("bar");
}
Aggregations