use of org.apache.hadoop.yarn.api.records.ApplicationReport in project asterixdb by apache.
the class AsterixYARNClient method killApplication.
/**
* Asks YARN to kill a given application by appId
*
* @param appId
* The application to kill.
* @param yarnClient
* The YARN client object that is connected to the RM.
* @throws YarnException
* @throws IOException
*/
public static void killApplication(ApplicationId appId, YarnClient yarnClient) throws YarnException, IOException {
if (appId == null) {
throw new YarnException("No Application given to kill");
}
if (yarnClient.isInState(STATE.INITED)) {
yarnClient.start();
}
YarnApplicationState st;
ApplicationReport rep = yarnClient.getApplicationReport(appId);
st = rep.getYarnApplicationState();
if (st == YarnApplicationState.FINISHED || st == YarnApplicationState.KILLED || st == YarnApplicationState.FAILED) {
LOG.info("Application " + appId + " already exited.");
return;
}
LOG.info("Killing applicaiton with ID: " + appId);
yarnClient.killApplication(appId);
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project apex-core by apache.
the class StramMiniClusterTest method testWebService.
/**
* Verify the web service deployment and lifecycle functionality
*
* @throws Exception
*/
//disabled due to web service init delay issue
@Ignore
@Test
public void testWebService() throws Exception {
// single container topology of inline input and module
Properties props = new Properties();
props.put(StreamingApplication.APEX_PREFIX + "stream.input.classname", TestGeneratorInputOperator.class.getName());
props.put(StreamingApplication.APEX_PREFIX + "stream.input.outputNode", "module1");
props.put(StreamingApplication.APEX_PREFIX + "module.module1.classname", GenericTestOperator.class.getName());
LOG.info("Initializing Client");
LogicalPlanConfiguration tb = new LogicalPlanConfiguration(new Configuration(false));
tb.addFromProperties(props, null);
StramClient client = new StramClient(new Configuration(yarnCluster.getConfig()), createDAG(tb));
if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
// JAVA_HOME not set in the yarn mini cluster
client.javaCmd = "java";
}
try {
client.start();
client.startApplication();
// attempt web service connection
ApplicationReport appReport = client.getApplicationReport();
// delay to give web service time to fully initialize
Thread.sleep(5000);
Client wsClient = Client.create();
wsClient.setFollowRedirects(true);
WebResource r = wsClient.resource("http://" + appReport.getTrackingUrl()).path(StramWebServices.PATH).path(StramWebServices.PATH_INFO);
LOG.info("Requesting: " + r.getURI());
ClientResponse response = r.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
LOG.info("Got response: " + json.toString());
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("appId", appReport.getApplicationId().toString(), json.get("id"));
r = wsClient.resource("http://" + appReport.getTrackingUrl()).path(StramWebServices.PATH).path(StramWebServices.PATH_PHYSICAL_PLAN_OPERATORS);
LOG.info("Requesting: " + r.getURI());
response = r.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
json = response.getEntity(JSONObject.class);
LOG.info("Got response: " + json.toString());
} finally {
//LOG.info("waiting...");
//synchronized (this) {
// this.wait();
//}
//boolean result = client.monitorApplication();
client.killApplication();
client.stop();
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project cdap by caskdata.
the class YarnApps method collect.
@Override
public synchronized void collect() throws Exception {
reset();
YarnClient yarnClient = createYARNClient();
List<ApplicationReport> applications;
try {
applications = yarnClient.getApplications();
} finally {
yarnClient.stop();
}
for (ApplicationReport application : applications) {
switch(application.getYarnApplicationState()) {
case NEW:
case NEW_SAVING:
newApps++;
break;
case ACCEPTED:
accepted++;
break;
case SUBMITTED:
submitted++;
break;
case RUNNING:
running++;
break;
case FINISHED:
finished++;
break;
case FAILED:
failed++;
break;
case KILLED:
killed++;
break;
}
}
total = applications.size();
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project incubator-systemml by apache.
the class DMLYarnClient method launchDMLYarnAppmaster.
/**
* Method to launch the dml yarn app master and execute the given dml script
* with the given configuration and jar file.
*
* NOTE: on launching the yarn app master, we do not explicitly probe if we
* are running on a yarn or MR1 cluster. In case of MR1, already the class
* YarnConfiguration will not be found and raise a classnotfound. In case of any
* exception we fall back to run CP directly in the client process.
*
* @return true if dml program successfully executed as yarn app master
* @throws IOException if IOException occurs
* @throws DMLScriptException if DMLScriptException occurs
*/
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
boolean ret = false;
String hdfsWD = null;
try {
Timing time = new Timing(true);
// load yarn configuration
YarnConfiguration yconf = new YarnConfiguration();
// create yarn client
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(yconf);
yarnClient.start();
// create application and get the ApplicationID
YarnClientApplication app = yarnClient.createApplication();
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LOG.debug("Created application (applicationID: " + appId + ")");
// prepare hdfs working directory via ApplicationID
// copy script, config, jar file to hdfs
hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
copyResourcesToHdfsWorkingDir(yconf, hdfsWD);
// construct command line argument
String command = constructAMCommand(_args, _dmlConfig);
LOG.debug("Constructed application master command: \n" + command);
// set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(Collections.singletonList(command));
amContainer.setLocalResources(constructLocalResourceMap(yconf));
amContainer.setEnvironment(constructEnvionmentMap(yconf));
// Set up resource type requirements for ApplicationMaster
int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
int memAlloc = (int) computeMemoryAllocation(memHeap);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memAlloc);
capability.setVirtualCores(NUM_CORES);
LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);
// Finally, set-up ApplicationSubmissionContext for the application
String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
// application name
appContext.setApplicationName(APPMASTER_NAME);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
// queue
appContext.setQueue(qname);
LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);
// submit application (non-blocking)
yarnClient.submitApplication(appContext);
// Check application status periodically (and output web ui address)
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
YarnApplicationState appState = appReport.getYarnApplicationState();
YarnApplicationState oldState = appState;
LOG.info("Application state: " + appState);
while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) {
// wait for 200ms
Thread.sleep(APP_STATE_INTERVAL);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
if (appState != oldState) {
oldState = appState;
LOG.info("Application state: " + appState);
}
}
// check final status (failed or succeeded)
FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
LOG.info("Application final status: " + finalState);
// show application and total runtime
double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
LOG.info("Application runtime: " + appRuntime + " sec.");
LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");
// raised script-level error in case of failed final status
if (finalState != FinalApplicationStatus.SUCCEEDED) {
// propagate script-level stop call message
String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
if (stop_msg != null)
throw new DMLScriptException(stop_msg);
// generic failure message
throw new DMLRuntimeException("DML yarn app master finished with final status: " + finalState + ".");
}
ret = true;
} catch (DMLScriptException ex) {
// rethrow DMLScriptException to propagate stop call
throw ex;
} catch (Exception ex) {
LOG.error("Failed to run DML yarn app master.", ex);
ret = false;
} finally {
// cleanup working directory
if (hdfsWD != null)
MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
}
return ret;
}
use of org.apache.hadoop.yarn.api.records.ApplicationReport in project metron by apache.
the class Client method monitorApplication.
/**
* Monitor the submitted application for completion.
* Kill application if time expires.
* @param appId Application Id of application to be monitored
* @return true if application completed successfully
* @throws YarnException
* @throws IOException
*/
private boolean monitorApplication(ApplicationId appId) throws YarnException, IOException {
while (true) {
// Check app status every 1 second.
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.debug("Thread sleep in monitoring loop interrupted");
}
// Get application report for the appId we are interested in
ApplicationReport report = yarnClient.getApplicationReport(appId);
LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics=" + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue=" + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime=" + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString() + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser());
YarnApplicationState state = report.getYarnApplicationState();
FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
if (YarnApplicationState.RUNNING == state) {
LOG.info("Application is running...");
return true;
}
if (YarnApplicationState.FINISHED == state) {
if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
LOG.info("Application has completed successfully. Breaking monitoring loop");
return true;
} else {
LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
return false;
}
} else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) {
LOG.info("Application did not finish." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
return false;
}
if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) {
LOG.info("Reached client specified timeout for application. Killing application");
forceKillApplication(appId);
return false;
}
}
}
Aggregations