Search in sources :

Example 71 with ApplicationSubmissionContext

use of org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext in project hadoop by apache.

the class AMSimulator method submitApp.

private void submitApp() throws YarnException, InterruptedException, IOException {
    // ask for new application
    GetNewApplicationRequest newAppRequest = Records.newRecord(GetNewApplicationRequest.class);
    GetNewApplicationResponse newAppResponse = rm.getClientRMService().getNewApplication(newAppRequest);
    appId = newAppResponse.getApplicationId();
    // submit the application
    final SubmitApplicationRequest subAppRequest = Records.newRecord(SubmitApplicationRequest.class);
    ApplicationSubmissionContext appSubContext = Records.newRecord(ApplicationSubmissionContext.class);
    appSubContext.setApplicationId(appId);
    appSubContext.setMaxAppAttempts(1);
    appSubContext.setQueue(queue);
    appSubContext.setPriority(Priority.newInstance(0));
    ContainerLaunchContext conLauContext = Records.newRecord(ContainerLaunchContext.class);
    conLauContext.setApplicationACLs(new HashMap<ApplicationAccessType, String>());
    conLauContext.setCommands(new ArrayList<String>());
    conLauContext.setEnvironment(new HashMap<String, String>());
    conLauContext.setLocalResources(new HashMap<String, LocalResource>());
    conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
    appSubContext.setAMContainerSpec(conLauContext);
    appSubContext.setResource(Resources.createResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB, MR_AM_CONTAINER_RESOURCE_VCORES));
    subAppRequest.setApplicationSubmissionContext(appSubContext);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    ugi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws YarnException, IOException {
            rm.getClientRMService().submitApplication(subAppRequest);
            return null;
        }
    });
    LOG.info(MessageFormat.format("Submit a new application {0}", appId));
}
Also used : GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) GetNewApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 72 with ApplicationSubmissionContext

use of org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext in project hadoop by apache.

the class YarnClientImpl method createApplication.

@Override
public YarnClientApplication createApplication() throws YarnException, IOException {
    ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class);
    GetNewApplicationResponse newApp = getNewApplication();
    ApplicationId appId = newApp.getApplicationId();
    context.setApplicationId(appId);
    return new YarnClientApplication(newApp, context);
}
Also used : GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 73 with ApplicationSubmissionContext

use of org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext in project hadoop by apache.

the class UnmanagedAMLauncher method run.

public boolean run() throws IOException, YarnException {
    LOG.info("Starting Client");
    // Connect to ResourceManager
    rmClient.start();
    try {
        // Create launch context for app master
        LOG.info("Setting up application submission context for ASM");
        ApplicationSubmissionContext appContext = rmClient.createApplication().getApplicationSubmissionContext();
        ApplicationId appId = appContext.getApplicationId();
        // set the application name
        appContext.setApplicationName(appName);
        // Set the priority for the application master
        Priority pri = Records.newRecord(Priority.class);
        pri.setPriority(amPriority);
        appContext.setPriority(pri);
        // Set the queue to which this application is to be submitted in the RM
        appContext.setQueue(amQueue);
        // Set up the container launch context for the application master
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
        appContext.setAMContainerSpec(amContainer);
        // unmanaged AM
        appContext.setUnmanagedAM(true);
        LOG.info("Setting unmanaged AM");
        // Submit the application to the applications manager
        LOG.info("Submitting application to ASM");
        rmClient.submitApplication(appContext);
        ApplicationReport appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED, YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED));
        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            // Monitor the application attempt to wait for launch state
            ApplicationAttemptReport attemptReport = monitorCurrentAppAttempt(appId, YarnApplicationAttemptState.LAUNCHED);
            ApplicationAttemptId attemptId = attemptReport.getApplicationAttemptId();
            LOG.info("Launching AM with application attempt id " + attemptId);
            // launch AM
            launchAM(attemptId);
            // Monitor the application for end state
            appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED));
        }
        YarnApplicationState appState = appReport.getYarnApplicationState();
        FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus();
        LOG.info("App ended with state: " + appReport.getYarnApplicationState() + " and status: " + appStatus);
        boolean success;
        if (YarnApplicationState.FINISHED == appState && FinalApplicationStatus.SUCCEEDED == appStatus) {
            LOG.info("Application has completed successfully.");
            success = true;
        } else {
            LOG.info("Application did finished unsuccessfully." + " YarnState=" + appState.toString() + ", FinalStatus=" + appStatus.toString());
            success = false;
        }
        return success;
    } finally {
        rmClient.stop();
    }
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) ApplicationAttemptReport(org.apache.hadoop.yarn.api.records.ApplicationAttemptReport) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) YarnApplicationState(org.apache.hadoop.yarn.api.records.YarnApplicationState) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 74 with ApplicationSubmissionContext

use of org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext in project hadoop by apache.

the class BaseAMRMProxyE2ETest method createApp.

protected ApplicationAttemptId createApp(YarnClient yarnClient, MiniYARNCluster yarnCluster, Configuration conf) throws Exception {
    ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName("Test");
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(0);
    appContext.setPriority(pri);
    appContext.setQueue("default");
    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "10000"), new HashMap<String, ByteBuffer>(), null, new HashMap<ApplicationAccessType, String>());
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(Resource.newInstance(1024, 1));
    SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
    appRequest.setApplicationSubmissionContext(appContext);
    yarnClient.submitApplication(appContext);
    RMAppAttempt appAttempt = null;
    ApplicationAttemptId attemptId = null;
    while (true) {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            attemptId = appReport.getCurrentApplicationAttemptId();
            appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt();
            while (true) {
                if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
                    break;
                }
            }
            break;
        }
    }
    Thread.sleep(1000);
    // Just dig into the ResourceManager and get the AMRMToken just for the sake
    // of testing.
    UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    // emulate RM setup of AMRM token in credentials by adding the token
    // *before* setting the token service
    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
    appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf));
    return attemptId;
}
Also used : RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) Priority(org.apache.hadoop.yarn.api.records.Priority) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ByteBuffer(java.nio.ByteBuffer) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 75 with ApplicationSubmissionContext

use of org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext in project asterixdb by apache.

the class AsterixYARNClient method deployAM.

/**
     * Submits the request to start the AsterixApplicationMaster to the YARN ResourceManager.
     *
     * @param app
     *            The application attempt handle.
     * @param resources
     *            Resources to be distributed as part of the container launch
     * @param mode
     *            The mode of the ApplicationMaster
     * @return The application ID of the new Asterix instance.
     * @throws IOException
     * @throws YarnException
     */
public ApplicationId deployAM(YarnClientApplication app, List<DFSResourceCoordinate> resources, Mode mode) throws IOException, YarnException {
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    // Set local resource info into app master container launch context
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    for (DFSResourceCoordinate res : resources) {
        localResources.put(res.name, res.res);
    }
    amContainer.setLocalResources(localResources);
    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    // scripts
    for (DFSResourceCoordinate res : resources) {
        if (res.envs == null) {
            //some entries may not have environment variables.
            continue;
        }
        for (Map.Entry<String, String> e : res.envs.entrySet()) {
            env.put(e.getValue(), e.getKey());
        }
    }
    //this is needed for when the RM address isn't known from the environment of the AM
    env.put(AConstants.RMADDRESS, conf.get("yarn.resourcemanager.address"));
    env.put(AConstants.RMSCHEDULERADDRESS, conf.get("yarn.resourcemanager.scheduler.address"));
    ///add miscellaneous environment variables.
    env.put(AConstants.INSTANCESTORE, CONF_DIR_REL + instanceFolder);
    env.put(AConstants.DFS_BASE, FileSystem.get(conf).getHomeDirectory().toUri().toString());
    env.put(AConstants.CC_JAVA_OPTS, ccJavaOpts);
    env.put(AConstants.NC_JAVA_OPTS, ncJavaOpts);
    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder("").append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("." + File.separator + "log4j.properties");
    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        LOG.info("In YARN MiniCluster");
        classPathEnv.append(System.getProperty("path.separator"));
        classPathEnv.append(System.getProperty("java.class.path"));
        env.put("HADOOP_CONF_DIR", System.getProperty("user.dir") + File.separator + "target" + File.separator);
    }
    LOG.info("AM Classpath:" + classPathEnv.toString());
    env.put("CLASSPATH", classPathEnv.toString());
    amContainer.setEnvironment(env);
    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);
    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(JAVA_HOME + File.separator + "bin" + File.separator + "java");
    // Set class name
    vargs.add(appMasterMainClass);
    //Set params for Application Master
    if (debugFlag) {
        vargs.add("-debug");
    }
    if (mode == Mode.DESTROY) {
        vargs.add("-obliterate");
    } else if (mode == Mode.BACKUP) {
        vargs.add("-backup");
    } else if (mode == Mode.RESTORE) {
        vargs.add("-restore " + snapName);
    } else if (mode == Mode.INSTALL) {
        vargs.add("-initial ");
    }
    if (refresh) {
        vargs.add("-refresh");
    }
    //vargs.add("/bin/ls -alh asterix-server.zip/repo");
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + "AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + File.separator + "AppMaster.stderr");
    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);
    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);
    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);
    // The following are not required for launching an application master
    // amContainer.setContainerId(containerId);
    appContext.setAMContainerSpec(amContainer);
    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);
    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");
    yarnClient.submitApplication(appContext);
    //now write the instance lock
    if (mode == Mode.INSTALL || mode == Mode.START) {
        FileSystem fs = FileSystem.get(conf);
        Path lock = new Path(fs.getHomeDirectory(), CONF_DIR_REL + instanceFolder + instanceLock);
        if (fs.exists(lock)) {
            throw new IllegalStateException("Somehow, this instance has been launched twice. ");
        }
        BufferedWriter out = new BufferedWriter(new OutputStreamWriter(fs.create(lock, true)));
        try {
            out.write(app.getApplicationSubmissionContext().getApplicationId().toString());
            out.close();
        } finally {
            out.close();
        }
    }
    return app.getApplicationSubmissionContext().getApplicationId();
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Priority(org.apache.hadoop.yarn.api.records.Priority) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) BufferedWriter(java.io.BufferedWriter) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) OutputStreamWriter(java.io.OutputStreamWriter) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Vector(java.util.Vector)

Aggregations

ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)86 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)46 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)42 Test (org.junit.Test)29 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)22 Resource (org.apache.hadoop.yarn.api.records.Resource)21 IOException (java.io.IOException)18 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)18 Configuration (org.apache.hadoop.conf.Configuration)15 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)15 Priority (org.apache.hadoop.yarn.api.records.Priority)15 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)15 ByteBuffer (java.nio.ByteBuffer)14 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)14 SubmitApplicationRequest (org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)13 YarnClientApplication (org.apache.hadoop.yarn.client.api.YarnClientApplication)13 RMAppAttemptMetrics (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics)13 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)12 RMAppAttemptImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl)11 ArrayList (java.util.ArrayList)10