use of org.apache.hadoop.yarn.client.api.YarnClient in project cdap by caskdata.
the class AbstractYarnStats method createYARNClient.
protected YarnClient createYARNClient() {
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
return yarnClient;
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project incubator-systemml by apache.
the class DMLYarnClient method launchDMLYarnAppmaster.
/**
* Method to launch the dml yarn app master and execute the given dml script
* with the given configuration and jar file.
*
* NOTE: on launching the yarn app master, we do not explicitly probe if we
* are running on a yarn or MR1 cluster. In case of MR1, already the class
* YarnConfiguration will not be found and raise a classnotfound. In case of any
* exception we fall back to run CP directly in the client process.
*
* @return true if dml program successfully executed as yarn app master
* @throws IOException if IOException occurs
* @throws DMLScriptException if DMLScriptException occurs
*/
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
boolean ret = false;
String hdfsWD = null;
try {
Timing time = new Timing(true);
// load yarn configuration
YarnConfiguration yconf = new YarnConfiguration();
// create yarn client
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(yconf);
yarnClient.start();
// create application and get the ApplicationID
YarnClientApplication app = yarnClient.createApplication();
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LOG.debug("Created application (applicationID: " + appId + ")");
// prepare hdfs working directory via ApplicationID
// copy script, config, jar file to hdfs
hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
copyResourcesToHdfsWorkingDir(yconf, hdfsWD);
// construct command line argument
String command = constructAMCommand(_args, _dmlConfig);
LOG.debug("Constructed application master command: \n" + command);
// set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(Collections.singletonList(command));
amContainer.setLocalResources(constructLocalResourceMap(yconf));
amContainer.setEnvironment(constructEnvionmentMap(yconf));
// Set up resource type requirements for ApplicationMaster
int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
int memAlloc = (int) computeMemoryAllocation(memHeap);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memAlloc);
capability.setVirtualCores(NUM_CORES);
LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);
// Finally, set-up ApplicationSubmissionContext for the application
String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
// application name
appContext.setApplicationName(APPMASTER_NAME);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
// queue
appContext.setQueue(qname);
LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);
// submit application (non-blocking)
yarnClient.submitApplication(appContext);
// Check application status periodically (and output web ui address)
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
YarnApplicationState appState = appReport.getYarnApplicationState();
YarnApplicationState oldState = appState;
LOG.info("Application state: " + appState);
while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) {
// wait for 200ms
Thread.sleep(APP_STATE_INTERVAL);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
if (appState != oldState) {
oldState = appState;
LOG.info("Application state: " + appState);
}
}
// check final status (failed or succeeded)
FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
LOG.info("Application final status: " + finalState);
// show application and total runtime
double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
LOG.info("Application runtime: " + appRuntime + " sec.");
LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");
// raised script-level error in case of failed final status
if (finalState != FinalApplicationStatus.SUCCEEDED) {
// propagate script-level stop call message
String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
if (stop_msg != null)
throw new DMLScriptException(stop_msg);
// generic failure message
throw new DMLRuntimeException("DML yarn app master finished with final status: " + finalState + ".");
}
ret = true;
} catch (DMLScriptException ex) {
// rethrow DMLScriptException to propagate stop call
throw ex;
} catch (Exception ex) {
LOG.error("Failed to run DML yarn app master.", ex);
ret = false;
} finally {
// cleanup working directory
if (hdfsWD != null)
MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
}
return ret;
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project incubator-systemml by apache.
the class YarnClusterAnalyzer method analyzeYarnCluster.
public static void analyzeYarnCluster(boolean verbose) {
YarnConfiguration conf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
analyzeYarnCluster(yarnClient, conf, verbose);
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project incubator-systemml by apache.
the class YarnClusterAnalyzer method createYarnClient.
private static YarnClient createYarnClient() {
YarnConfiguration conf = new YarnConfiguration();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
return yarnClient;
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project metron by apache.
the class MaasIntegrationTest method testDSShell.
public void testDSShell(boolean haveDomain) throws Exception {
MaaSConfig config = new MaaSConfig() {
{
setServiceRoot("/maas/service");
setQueueConfig(new HashMap<String, Object>() {
{
put(ZKQueue.ZK_PATH, "/maas/queue");
}
});
}
};
String configRoot = "/maas/config";
byte[] configData = ConfigUtil.INSTANCE.toBytes(config);
try {
client.setData().forPath(configRoot, configData);
} catch (KeeperException.NoNodeException e) {
client.create().creatingParentsIfNeeded().forPath(configRoot, configData);
}
String[] args = { "--jar", yarnComponent.getAppMasterJar(), "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--master_memory", "512", "--master_vcores", "2" };
if (haveDomain) {
String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" };
List<String> argsList = new ArrayList<String>(Arrays.asList(args));
argsList.addAll(Arrays.asList(domainArgs));
args = argsList.toArray(new String[argsList.size()]);
}
YarnConfiguration conf = yarnComponent.getConfig();
LOG.info("Initializing DS Client");
final Client client = new Client(new Configuration(conf));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result = new AtomicBoolean(false);
Thread t = new Thread() {
@Override
public void run() {
try {
result.set(client.run());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
t.start();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(new Configuration(conf));
yarnClient.start();
String hostName = NetUtils.getHostname();
boolean verified = false;
String errorMessage = "";
while (!verified) {
List<ApplicationReport> apps = yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
ApplicationReport appReport = apps.get(0);
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified = true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
break;
}
}
Assert.assertTrue(errorMessage, verified);
FileSystem fs = FileSystem.get(conf);
try {
new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--local_model_path", "src/test/resources/maas", "--hdfs_model_path", new Path(fs.getHomeDirectory(), "maas/dummy").toString(), "--num_instances", "1", "--memory", "100", "--mode", "ADD", "--log4j", "src/test/resources/log4j.properties" });
ServiceDiscoverer discoverer = new ServiceDiscoverer(this.client, config.getServiceRoot());
discoverer.start();
{
boolean passed = false;
for (int i = 0; i < 100; ++i) {
try {
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
if (endpoints != null && endpoints.size() == 1) {
LOG.trace("Found endpoints: " + endpoints.get(0));
String output = makeRESTcall(new URL(endpoints.get(0).getEndpoint().getUrl() + "/echo/casey"));
if (output.contains("casey")) {
passed = true;
break;
}
}
} catch (Exception e) {
}
Thread.sleep(2000);
}
Assert.assertTrue(passed);
}
{
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
Assert.assertNotNull(endpoints);
Assert.assertEquals(1, endpoints.size());
}
new ModelSubmission().execute(FileSystem.get(conf), new String[] { "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--num_instances", "1", "--mode", "REMOVE" });
{
boolean passed = false;
for (int i = 0; i < 100; ++i) {
try {
List<ModelEndpoint> endpoints = discoverer.getEndpoints(new Model("dummy", "1.0"));
// ensure that the endpoint is dead.
if (endpoints == null || endpoints.size() == 0) {
passed = true;
break;
}
} catch (Exception e) {
}
Thread.sleep(2000);
}
Assert.assertTrue(passed);
}
} finally {
cleanup();
}
}
Aggregations