use of org.apache.hadoop.yarn.api.records.YarnApplicationState in project apex-core by apache.
the class InlineAM method run.
public boolean run() throws Exception {
LOG.info("Starting Client");
// Connect to ResourceManager
rmClient.start();
try {
// Get a new application id
YarnClientApplication newApp = rmClient.createApplication();
ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId();
// Create launch context for app master
LOG.info("Setting up application submission context for ASM");
ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
// set the application id
appContext.setApplicationId(appId);
// set the application name
appContext.setApplicationName(appName);
// Set the priority for the application master
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(amPriority);
appContext.setPriority(pri);
// Set the queue to which this application is to be submitted in the RM
appContext.setQueue(amQueue);
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
// unmanaged AM
appContext.setUnmanagedAM(true);
LOG.info("Setting unmanaged AM");
// Submit the application to the applications manager
LOG.info("Submitting application to ASM");
rmClient.submitApplication(appContext);
// Monitor the application to wait for launch state
ApplicationReport appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED));
ApplicationAttemptId attemptId = appReport.getCurrentApplicationAttemptId();
LOG.info("Launching application with id: " + attemptId);
// launch AM
runAM(attemptId);
// Monitor the application for end state
appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED));
YarnApplicationState appState = appReport.getYarnApplicationState();
FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus();
LOG.info("App ended with state: " + appReport.getYarnApplicationState() + " and status: " + appStatus);
boolean success;
if (YarnApplicationState.FINISHED == appState && FinalApplicationStatus.SUCCEEDED == appStatus) {
LOG.info("Application has completed successfully.");
success = true;
} else {
LOG.info("Application did finished unsuccessfully." + " YarnState=" + appState.toString() + ", FinalStatus=" + appStatus.toString());
success = false;
}
return success;
} finally {
rmClient.stop();
}
}
use of org.apache.hadoop.yarn.api.records.YarnApplicationState in project asterixdb by apache.
the class AsterixYARNClient method killApplication.
/**
* Asks YARN to kill a given application by appId
*
* @param appId
* The application to kill.
* @param yarnClient
* The YARN client object that is connected to the RM.
* @throws YarnException
* @throws IOException
*/
public static void killApplication(ApplicationId appId, YarnClient yarnClient) throws YarnException, IOException {
if (appId == null) {
throw new YarnException("No Application given to kill");
}
if (yarnClient.isInState(STATE.INITED)) {
yarnClient.start();
}
YarnApplicationState st;
ApplicationReport rep = yarnClient.getApplicationReport(appId);
st = rep.getYarnApplicationState();
if (st == YarnApplicationState.FINISHED || st == YarnApplicationState.KILLED || st == YarnApplicationState.FAILED) {
LOG.info("Application " + appId + " already exited.");
return;
}
LOG.info("Killing applicaiton with ID: " + appId);
yarnClient.killApplication(appId);
}
use of org.apache.hadoop.yarn.api.records.YarnApplicationState in project incubator-systemml by apache.
the class DMLYarnClient method launchDMLYarnAppmaster.
/**
* Method to launch the dml yarn app master and execute the given dml script
* with the given configuration and jar file.
*
* NOTE: on launching the yarn app master, we do not explicitly probe if we
* are running on a yarn or MR1 cluster. In case of MR1, already the class
* YarnConfiguration will not be found and raise a classnotfound. In case of any
* exception we fall back to run CP directly in the client process.
*
* @return true if dml program successfully executed as yarn app master
* @throws IOException if IOException occurs
* @throws DMLScriptException if DMLScriptException occurs
*/
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
boolean ret = false;
String hdfsWD = null;
try {
Timing time = new Timing(true);
// load yarn configuration
YarnConfiguration yconf = new YarnConfiguration();
// create yarn client
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(yconf);
yarnClient.start();
// create application and get the ApplicationID
YarnClientApplication app = yarnClient.createApplication();
ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
ApplicationId appId = appContext.getApplicationId();
LOG.debug("Created application (applicationID: " + appId + ")");
// prepare hdfs working directory via ApplicationID
// copy script, config, jar file to hdfs
hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
copyResourcesToHdfsWorkingDir(yconf, hdfsWD);
//construct command line argument
String command = constructAMCommand(_args, _dmlConfig);
LOG.debug("Constructed application master command: \n" + command);
// set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(Collections.singletonList(command));
amContainer.setLocalResources(constructLocalResourceMap(yconf));
amContainer.setEnvironment(constructEnvionmentMap(yconf));
// Set up resource type requirements for ApplicationMaster
int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
int memAlloc = (int) computeMemoryAllocation(memHeap);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memAlloc);
capability.setVirtualCores(NUM_CORES);
LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);
// Finally, set-up ApplicationSubmissionContext for the application
String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
// application name
appContext.setApplicationName(APPMASTER_NAME);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
// queue
appContext.setQueue(qname);
LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);
// submit application (non-blocking)
yarnClient.submitApplication(appContext);
// Check application status periodically (and output web ui address)
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
YarnApplicationState appState = appReport.getYarnApplicationState();
YarnApplicationState oldState = appState;
LOG.info("Application state: " + appState);
while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) {
//wait for 200ms
Thread.sleep(APP_STATE_INTERVAL);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
if (appState != oldState) {
oldState = appState;
LOG.info("Application state: " + appState);
}
}
//check final status (failed or succeeded)
FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
LOG.info("Application final status: " + finalState);
//show application and total runtime
double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
LOG.info("Application runtime: " + appRuntime + " sec.");
LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");
//raised script-level error in case of failed final status
if (finalState != FinalApplicationStatus.SUCCEEDED) {
//propagate script-level stop call message
String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
if (stop_msg != null)
throw new DMLScriptException(stop_msg);
//generic failure message
throw new DMLRuntimeException("DML yarn app master finished with final status: " + finalState + ".");
}
ret = true;
} catch (DMLScriptException ex) {
//rethrow DMLScriptException to propagate stop call
throw ex;
} catch (Exception ex) {
LOG.error("Failed to run DML yarn app master.", ex);
ret = false;
} finally {
//cleanup working directory
if (hdfsWD != null)
MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
}
return ret;
}
use of org.apache.hadoop.yarn.api.records.YarnApplicationState in project hadoop by apache.
the class RMWebServices method getAppStatistics.
@GET
@Path("/appstatistics")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public ApplicationStatisticsInfo getAppStatistics(@Context HttpServletRequest hsr, @QueryParam("states") Set<String> stateQueries, @QueryParam("applicationTypes") Set<String> typeQueries) {
init();
// parse the params and build the scoreboard
// converting state/type name to lowercase
Set<String> states = parseQueries(stateQueries, true);
Set<String> types = parseQueries(typeQueries, false);
// if no types, counts the applications of any types
if (types.size() == 0) {
types.add(ANY);
} else if (types.size() != 1) {
throw new BadRequestException("# of applicationTypes = " + types.size() + ", we temporarily support at most one applicationType");
}
// if no states, returns the counts of all RMAppStates
if (states.size() == 0) {
for (YarnApplicationState state : YarnApplicationState.values()) {
states.add(StringUtils.toLowerCase(state.toString()));
}
}
// in case we extend to multiple applicationTypes in the future
Map<YarnApplicationState, Map<String, Long>> scoreboard = buildScoreboard(states, types);
// go through the apps in RM to count the numbers, ignoring the case of
// the state/type name
ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps();
for (RMApp rmapp : apps.values()) {
YarnApplicationState state = rmapp.createApplicationState();
String type = StringUtils.toLowerCase(rmapp.getApplicationType().trim());
if (states.contains(StringUtils.toLowerCase(state.toString()))) {
if (types.contains(ANY)) {
countApp(scoreboard, state, ANY);
} else if (types.contains(type)) {
countApp(scoreboard, state, type);
}
}
}
// fill the response object
ApplicationStatisticsInfo appStatInfo = new ApplicationStatisticsInfo();
for (Map.Entry<YarnApplicationState, Map<String, Long>> partScoreboard : scoreboard.entrySet()) {
for (Map.Entry<String, Long> statEntry : partScoreboard.getValue().entrySet()) {
StatisticsItemInfo statItem = new StatisticsItemInfo(partScoreboard.getKey(), statEntry.getKey(), statEntry.getValue());
appStatInfo.add(statItem);
}
}
return appStatInfo;
}
use of org.apache.hadoop.yarn.api.records.YarnApplicationState in project hadoop by apache.
the class FairSchedulerAppsBlock method render.
@Override
public void render(Block html) {
TBODY<TABLE<Hamlet>> tbody = html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User").th(".name", "Name").th(".type", "Application Type").th(".queue", "Queue").th(".fairshare", "Fair Share").th(".starttime", "StartTime").th(".finishtime", "FinishTime").th(".state", "State").th(".finalstatus", "FinalStatus").th(".runningcontainer", "Running Containers").th(".allocatedCpu", "Allocated CPU VCores").th(".allocatedMemory", "Allocated Memory MB").th(".progress", "Progress").th(".ui", "Tracking UI")._()._().tbody();
Collection<YarnApplicationState> reqAppStates = null;
String reqStateString = $(APP_STATE);
if (reqStateString != null && !reqStateString.isEmpty()) {
String[] appStateStrings = reqStateString.split(",");
reqAppStates = new HashSet<YarnApplicationState>(appStateStrings.length);
for (String stateString : appStateStrings) {
reqAppStates.add(YarnApplicationState.valueOf(stateString));
}
}
StringBuilder appsTableData = new StringBuilder("[\n");
for (RMApp app : apps.values()) {
if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
continue;
}
AppInfo appInfo = new AppInfo(rm, app, true, WebAppUtils.getHttpSchemePrefix(conf));
String percent = StringUtils.format("%.1f", appInfo.getProgress());
ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId();
long fairShare = fsinfo.getAppFairShare(attemptId);
if (fairShare == FairSchedulerInfo.INVALID_FAIR_SHARE) {
// FairScheduler#applications don't have the entry. Skip it.
continue;
}
appsTableData.append("[\"<a href='").append(url("app", appInfo.getAppId())).append("'>").append(appInfo.getAppId()).append("</a>\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(appInfo.getUser()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(appInfo.getName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(appInfo.getApplicationType()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(appInfo.getQueue()))).append("\",\"").append(fairShare).append("\",\"").append(appInfo.getStartTime()).append("\",\"").append(appInfo.getFinishTime()).append("\",\"").append(appInfo.getState()).append("\",\"").append(appInfo.getFinalStatus()).append("\",\"").append(appInfo.getRunningContainers() == -1 ? "N/A" : String.valueOf(appInfo.getRunningContainers())).append("\",\"").append(appInfo.getAllocatedVCores() == -1 ? "N/A" : String.valueOf(appInfo.getAllocatedVCores())).append("\",\"").append(appInfo.getAllocatedMB() == -1 ? "N/A" : String.valueOf(appInfo.getAllocatedMB())).append("\",\"").append("<br title='").append(percent).append("'> <div class='").append(C_PROGRESSBAR).append("' title='").append(join(percent, '%')).append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE).append("' style='").append(join("width:", percent, '%')).append("'> </div> </div>").append("\",\"<a href='");
String trackingURL = !appInfo.isTrackingUrlReady() ? "#" : appInfo.getTrackingUrlPretty();
appsTableData.append(trackingURL).append("'>").append(appInfo.getTrackingUI()).append("</a>\"],\n");
}
if (appsTableData.charAt(appsTableData.length() - 2) == ',') {
appsTableData.delete(appsTableData.length() - 2, appsTableData.length() - 1);
}
appsTableData.append("]");
html.script().$type("text/javascript")._("var appsTableData=" + appsTableData)._();
tbody._()._();
}
Aggregations