Search in sources :

Example 76 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project asterixdb by apache.

the class AsterixYARNClient method killApplication.

/**
     * Asks YARN to kill a given application by appId
     *
     * @param appId
     *            The application to kill.
     * @param yarnClient
     *            The YARN client object that is connected to the RM.
     * @throws YarnException
     * @throws IOException
     */
public static void killApplication(ApplicationId appId, YarnClient yarnClient) throws YarnException, IOException {
    if (appId == null) {
        throw new YarnException("No Application given to kill");
    }
    if (yarnClient.isInState(STATE.INITED)) {
        yarnClient.start();
    }
    YarnApplicationState st;
    ApplicationReport rep = yarnClient.getApplicationReport(appId);
    st = rep.getYarnApplicationState();
    if (st == YarnApplicationState.FINISHED || st == YarnApplicationState.KILLED || st == YarnApplicationState.FAILED) {
        LOG.info("Application " + appId + " already exited.");
        return;
    }
    LOG.info("Killing applicaiton with ID: " + appId);
    yarnClient.killApplication(appId);
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) YarnException(org.apache.hadoop.yarn.exceptions.YarnException)

Example 77 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project apex-core by apache.

the class StramMiniClusterTest method testWebService.

/**
   * Verify the web service deployment and lifecycle functionality
   *
   * @throws Exception
   */
//disabled due to web service init delay issue
@Ignore
@Test
public void testWebService() throws Exception {
    // single container topology of inline input and module
    Properties props = new Properties();
    props.put(StreamingApplication.APEX_PREFIX + "stream.input.classname", TestGeneratorInputOperator.class.getName());
    props.put(StreamingApplication.APEX_PREFIX + "stream.input.outputNode", "module1");
    props.put(StreamingApplication.APEX_PREFIX + "module.module1.classname", GenericTestOperator.class.getName());
    LOG.info("Initializing Client");
    LogicalPlanConfiguration tb = new LogicalPlanConfiguration(new Configuration(false));
    tb.addFromProperties(props, null);
    StramClient client = new StramClient(new Configuration(yarnCluster.getConfig()), createDAG(tb));
    if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
        // JAVA_HOME not set in the yarn mini cluster
        client.javaCmd = "java";
    }
    try {
        client.start();
        client.startApplication();
        // attempt web service connection
        ApplicationReport appReport = client.getApplicationReport();
        // delay to give web service time to fully initialize
        Thread.sleep(5000);
        Client wsClient = Client.create();
        wsClient.setFollowRedirects(true);
        WebResource r = wsClient.resource("http://" + appReport.getTrackingUrl()).path(StramWebServices.PATH).path(StramWebServices.PATH_INFO);
        LOG.info("Requesting: " + r.getURI());
        ClientResponse response = r.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        JSONObject json = response.getEntity(JSONObject.class);
        LOG.info("Got response: " + json.toString());
        assertEquals("incorrect number of elements", 1, json.length());
        assertEquals("appId", appReport.getApplicationId().toString(), json.get("id"));
        r = wsClient.resource("http://" + appReport.getTrackingUrl()).path(StramWebServices.PATH).path(StramWebServices.PATH_PHYSICAL_PLAN_OPERATORS);
        LOG.info("Requesting: " + r.getURI());
        response = r.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        json = response.getEntity(JSONObject.class);
        LOG.info("Got response: " + json.toString());
    } finally {
        //LOG.info("waiting...");
        //synchronized (this) {
        //  this.wait();
        //}
        //boolean result = client.monitorApplication();
        client.killApplication();
        client.stop();
    }
}
Also used : LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) ClientResponse(com.sun.jersey.api.client.ClientResponse) Configuration(org.apache.hadoop.conf.Configuration) LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JSONObject(org.codehaus.jettison.json.JSONObject) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) WebResource(com.sun.jersey.api.client.WebResource) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) Properties(java.util.Properties) AMRMClient(org.apache.hadoop.yarn.client.api.AMRMClient) Client(com.sun.jersey.api.client.Client) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 78 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project cdap by caskdata.

the class YarnApps method collect.

@Override
public synchronized void collect() throws Exception {
    reset();
    YarnClient yarnClient = createYARNClient();
    List<ApplicationReport> applications;
    try {
        applications = yarnClient.getApplications();
    } finally {
        yarnClient.stop();
    }
    for (ApplicationReport application : applications) {
        switch(application.getYarnApplicationState()) {
            case NEW:
            case NEW_SAVING:
                newApps++;
                break;
            case ACCEPTED:
                accepted++;
                break;
            case SUBMITTED:
                submitted++;
                break;
            case RUNNING:
                running++;
                break;
            case FINISHED:
                finished++;
                break;
            case FAILED:
                failed++;
                break;
            case KILLED:
                killed++;
                break;
        }
    }
    total = applications.size();
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient)

Example 79 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project incubator-systemml by apache.

the class DMLYarnClient method launchDMLYarnAppmaster.

/**
 * Method to launch the dml yarn app master and execute the given dml script
 * with the given configuration and jar file.
 *
 * NOTE: on launching the yarn app master, we do not explicitly probe if we
 *	  are running on a yarn or MR1 cluster. In case of MR1, already the class
 *	  YarnConfiguration will not be found and raise a classnotfound. In case of any
 *	  exception we fall back to run CP directly in the client process.
 *
 * @return true if dml program successfully executed as yarn app master
 * @throws IOException if IOException occurs
 * @throws DMLScriptException if DMLScriptException occurs
 */
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
    boolean ret = false;
    String hdfsWD = null;
    try {
        Timing time = new Timing(true);
        // load yarn configuration
        YarnConfiguration yconf = new YarnConfiguration();
        // create yarn client
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(yconf);
        yarnClient.start();
        // create application and get the ApplicationID
        YarnClientApplication app = yarnClient.createApplication();
        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
        ApplicationId appId = appContext.getApplicationId();
        LOG.debug("Created application (applicationID: " + appId + ")");
        // prepare hdfs working directory via ApplicationID
        // copy script, config, jar file to hdfs
        hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
        copyResourcesToHdfsWorkingDir(yconf, hdfsWD);
        // construct command line argument
        String command = constructAMCommand(_args, _dmlConfig);
        LOG.debug("Constructed application master command: \n" + command);
        // set up the container launch context for the application master
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
        amContainer.setCommands(Collections.singletonList(command));
        amContainer.setLocalResources(constructLocalResourceMap(yconf));
        amContainer.setEnvironment(constructEnvionmentMap(yconf));
        // Set up resource type requirements for ApplicationMaster
        int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
        int memAlloc = (int) computeMemoryAllocation(memHeap);
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(memAlloc);
        capability.setVirtualCores(NUM_CORES);
        LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);
        // Finally, set-up ApplicationSubmissionContext for the application
        String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
        // application name
        appContext.setApplicationName(APPMASTER_NAME);
        appContext.setAMContainerSpec(amContainer);
        appContext.setResource(capability);
        // queue
        appContext.setQueue(qname);
        LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);
        // submit application (non-blocking)
        yarnClient.submitApplication(appContext);
        // Check application status periodically (and output web ui address)
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
        YarnApplicationState appState = appReport.getYarnApplicationState();
        YarnApplicationState oldState = appState;
        LOG.info("Application state: " + appState);
        while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) {
            // wait for 200ms
            Thread.sleep(APP_STATE_INTERVAL);
            appReport = yarnClient.getApplicationReport(appId);
            appState = appReport.getYarnApplicationState();
            if (appState != oldState) {
                oldState = appState;
                LOG.info("Application state: " + appState);
            }
        }
        // check final status (failed or succeeded)
        FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
        LOG.info("Application final status: " + finalState);
        // show application and total runtime
        double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
        LOG.info("Application runtime: " + appRuntime + " sec.");
        LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");
        // raised script-level error in case of failed final status
        if (finalState != FinalApplicationStatus.SUCCEEDED) {
            // propagate script-level stop call message
            String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
            if (stop_msg != null)
                throw new DMLScriptException(stop_msg);
            // generic failure message
            throw new DMLRuntimeException("DML yarn app master finished with final status: " + finalState + ".");
        }
        ret = true;
    } catch (DMLScriptException ex) {
        // rethrow DMLScriptException to propagate stop call
        throw ex;
    } catch (Exception ex) {
        LOG.error("Failed to run DML yarn app master.", ex);
        ret = false;
    } finally {
        // cleanup working directory
        if (hdfsWD != null)
            MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
    }
    return ret;
}
Also used : YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) IOException(java.io.IOException) DMLScriptException(org.apache.sysml.runtime.DMLScriptException) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) DMLScriptException(org.apache.sysml.runtime.DMLScriptException) Timing(org.apache.sysml.runtime.controlprogram.parfor.stat.Timing) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 80 with ApplicationReport

use of org.apache.hadoop.yarn.api.records.ApplicationReport in project metron by apache.

the class Client method monitorApplication.

/**
 * Monitor the submitted application for completion.
 * Kill application if time expires.
 * @param appId Application Id of application to be monitored
 * @return true if application completed successfully
 * @throws YarnException
 * @throws IOException
 */
private boolean monitorApplication(ApplicationId appId) throws YarnException, IOException {
    while (true) {
        // Check app status every 1 second.
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.debug("Thread sleep in monitoring loop interrupted");
        }
        // Get application report for the appId we are interested in
        ApplicationReport report = yarnClient.getApplicationReport(appId);
        LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics=" + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue=" + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime=" + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString() + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser());
        YarnApplicationState state = report.getYarnApplicationState();
        FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
        if (YarnApplicationState.RUNNING == state) {
            LOG.info("Application is running...");
            return true;
        }
        if (YarnApplicationState.FINISHED == state) {
            if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
                LOG.info("Application has completed successfully. Breaking monitoring loop");
                return true;
            } else {
                LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
                return false;
            }
        } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) {
            LOG.info("Application did not finish." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
            return false;
        }
        if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) {
            LOG.info("Reached client specified timeout for application. Killing application");
            forceKillApplication(appId);
            return false;
        }
    }
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState)

Aggregations

ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)143 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)65 Test (org.junit.Test)52 IOException (java.io.IOException)41 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)32 YarnApplicationState (org.apache.hadoop.yarn.api.records.YarnApplicationState)29 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)21 ApplicationNotFoundException (org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)18 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)16 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)16 Configuration (org.apache.hadoop.conf.Configuration)14 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)14 ArrayList (java.util.ArrayList)13 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)13 GetApplicationReportRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)11 GetApplicationsRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest)11 GetApplicationReportResponse (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse)10 ApplicationResourceUsageReport (org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport)10 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)10