Search in sources :

Example 41 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TestLinuxContainerExecutor method runAndBlock.

private int runAndBlock(ContainerId cId, String... cmd) throws IOException {
    String appId = "APP_" + getNextId();
    Container container = mock(Container.class);
    ContainerLaunchContext context = mock(ContainerLaunchContext.class);
    HashMap<String, String> env = new HashMap<String, String>();
    when(container.getContainerId()).thenReturn(cId);
    when(container.getLaunchContext()).thenReturn(context);
    when(context.getEnvironment()).thenReturn(env);
    String script = writeScriptFile(cmd);
    Path scriptPath = new Path(script);
    Path tokensPath = new Path("/dev/null");
    Path workDir = new Path(workSpace.getAbsolutePath());
    Path pidFile = new Path(workDir, "pid.txt");
    exec.activateContainer(cId, pidFile);
    return exec.launchContainer(new ContainerStartContext.Builder().setContainer(container).setNmPrivateContainerScriptPath(scriptPath).setNmPrivateTokensPath(tokensPath).setUser(appSubmitter).setAppId(appId).setContainerWorkDir(workDir).setLocalDirs(dirsHandler.getLocalDirs()).setLogDirs(dirsHandler.getLogDirs()).build());
}
Also used : Path(org.apache.hadoop.fs.Path) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) HashMap(java.util.HashMap) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext)

Example 42 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TestYarnClient method testAutomaticTimelineDelegationTokenLoading.

@Test
public void testAutomaticTimelineDelegationTokenLoading() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
    TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier();
    final Token<TimelineDelegationTokenIdentifier> dToken = new Token<TimelineDelegationTokenIdentifier>(timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text());
    // create a mock client
    YarnClientImpl client = spy(new YarnClientImpl() {

        @Override
        TimelineClient createTimelineClient() throws IOException, YarnException {
            timelineClient = mock(TimelineClient.class);
            when(timelineClient.getDelegationToken(any(String.class))).thenReturn(dToken);
            return timelineClient;
        }

        @Override
        protected void serviceStart() throws Exception {
            rmClient = mock(ApplicationClientProtocol.class);
        }

        @Override
        protected void serviceStop() throws Exception {
        }

        @Override
        public ApplicationReport getApplicationReport(ApplicationId appId) {
            ApplicationReport report = mock(ApplicationReport.class);
            when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.RUNNING);
            return report;
        }

        @Override
        public boolean isSecurityEnabled() {
            return true;
        }
    });
    client.init(conf);
    client.start();
    try {
        // when i == 1, timeline DT doesn't exist, need to get one more
        for (int i = 0; i < 2; ++i) {
            ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class);
            ApplicationId applicationId = ApplicationId.newInstance(0, i + 1);
            when(context.getApplicationId()).thenReturn(applicationId);
            DataOutputBuffer dob = new DataOutputBuffer();
            Credentials credentials = new Credentials();
            if (i == 0) {
                credentials.addToken(client.timelineService, dToken);
            }
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            ContainerLaunchContext clc = ContainerLaunchContext.newInstance(null, null, null, null, tokens, null);
            when(context.getAMContainerSpec()).thenReturn(clc);
            client.submitApplication(context);
            if (i == 0) {
                // GetTimelineDelegationToken shouldn't be called
                verify(client, never()).getTimelineDelegationToken();
            }
            // In either way, token should be there
            credentials = new Credentials();
            DataInputByteBuffer dibb = new DataInputByteBuffer();
            tokens = clc.getTokens();
            if (tokens != null) {
                dibb.reset(tokens);
                credentials.readTokenStorageStream(dibb);
                tokens.rewind();
            }
            Collection<Token<? extends TokenIdentifier>> dTokens = credentials.getAllTokens();
            Assert.assertEquals(1, dTokens.size());
            Assert.assertEquals(dToken, dTokens.iterator().next());
        }
    } finally {
        client.stop();
    }
}
Also used : TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) TimelineDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) TimelineDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ApplicationNotFoundException(org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException) ApplicationIdNotProvidedException(org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException) IOException(java.io.IOException) ContainerNotFoundException(org.apache.hadoop.yarn.exceptions.ContainerNotFoundException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) TimelineClient(org.apache.hadoop.yarn.client.api.TimelineClient) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 43 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TestJavaSandboxLinuxContainerRuntime method createRuntimeContext.

public ContainerRuntimeContext.Builder createRuntimeContext() {
    Container container = mock(Container.class);
    ContainerLaunchContext ctx = mock(ContainerLaunchContext.class);
    when(container.getLaunchContext()).thenReturn(ctx);
    when(ctx.getEnvironment()).thenReturn(env);
    ContainerRuntimeContext.Builder builder = new ContainerRuntimeContext.Builder(container);
    List<String> localDirs = new ArrayList<>();
    builder.setExecutionAttribute(LOCALIZED_RESOURCES, resources).setExecutionAttribute(RUN_AS_USER, NORMAL_USER).setExecutionAttribute(CONTAINER_ID_STR, CONTAINER_ID).setExecutionAttribute(APPID, APPLICATION_ID).setExecutionAttribute(CONTAINER_WORK_DIR, new Path(containerDir.toString())).setExecutionAttribute(LOCAL_DIRS, localDirs).setExecutionAttribute(LOG_DIRS, localDirs).setExecutionAttribute(FILECACHE_DIRS, localDirs).setExecutionAttribute(USER_LOCAL_DIRS, localDirs).setExecutionAttribute(CONTAINER_LOCAL_DIRS, localDirs).setExecutionAttribute(CONTAINER_RUN_CMDS, localDirs);
    return builder;
}
Also used : Path(org.apache.hadoop.fs.Path) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ArrayList(java.util.ArrayList) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ContainerRuntimeContext(org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext)

Example 44 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TestContainerLaunch method testContainerEnvVariables.

/**
   * See if environment variable is forwarded using sanitizeEnv.
   * @throws Exception
   */
@Test(timeout = 60000)
public void testContainerEnvVariables() throws Exception {
    containerManager.start();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    Map<String, String> userSetEnv = new HashMap<String, String>();
    userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
    userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
    userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
    userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
    userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
    userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key());
    userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME");
    userSetEnv.put(Environment.PWD.name(), "user_set_PWD");
    userSetEnv.put(Environment.HOME.name(), "user_set_HOME");
    containerLaunchContext.setEnvironment(userSetEnv);
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "env_vars.txt").getAbsoluteFile();
    if (Shell.WINDOWS) {
        fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.USER.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.PWD.$() + ">> " + processStartFile);
        fileWriter.println("@echo " + Environment.HOME.$() + ">> " + processStartFile);
        for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
            fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + "%>> " + processStartFile);
        }
        fileWriter.println("@echo " + cId + ">> " + processStartFile);
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        // So that start file is readable by the test
        fileWriter.write("\numask 0");
        fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.USER.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.PWD.name() + " >> " + processStartFile);
        fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + processStartFile);
        for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
            fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName + " >> " + processStartFile);
        }
        fileWriter.write("\necho $$ >> " + processStartFile);
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    // upload the script file so that the container can run it
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    // set up the rest of the container
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    containerLaunchContext.setCommands(commands);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, Priority.newInstance(0), 0));
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    List<String> localDirs = dirsHandler.getLocalDirs();
    List<String> logDirs = dirsHandler.getLogDirs();
    List<Path> appDirs = new ArrayList<Path>(localDirs.size());
    for (String localDir : localDirs) {
        Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
        Path userdir = new Path(usersdir, user);
        Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
        appDirs.add(new Path(appsdir, appId.toString()));
    }
    List<String> containerLogDirs = new ArrayList<String>();
    String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir(appId.toString(), cId.toString());
    for (String logDir : logDirs) {
        containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
    }
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals(cId.toString(), reader.readLine());
    Assert.assertEquals(context.getNodeId().getHost(), reader.readLine());
    Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), reader.readLine());
    Assert.assertEquals(String.valueOf(HTTP_PORT), reader.readLine());
    Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine());
    Assert.assertEquals(user, reader.readLine());
    Assert.assertEquals(user, reader.readLine());
    String obtainedPWD = reader.readLine();
    boolean found = false;
    for (Path localDir : appDirs) {
        if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
            found = true;
            break;
        }
    }
    Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
    Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), reader.readLine());
    for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
        Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName), ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
    }
    Assert.assertEquals(cId.toString(), containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
    Assert.assertEquals(context.getNodeId().getHost(), containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
    Assert.assertEquals(String.valueOf(context.getNodeId().getPort()), containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
    Assert.assertEquals(String.valueOf(HTTP_PORT), containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
    Assert.assertEquals(StringUtils.join(",", appDirs), containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
    Assert.assertEquals(StringUtils.join(",", containerLogDirs), containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
    Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.USER.name()));
    Assert.assertEquals(user, containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
    found = false;
    obtainedPWD = containerLaunchContext.getEnvironment().get(Environment.PWD.name());
    for (Path localDir : appDirs) {
        if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) {
            found = true;
            break;
        }
    }
    Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found);
    Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    // Now test the stop functionality.
    // Assert that the process is alive
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    // Once more
    Assert.assertTrue("Process is not alive!", DefaultContainerExecutor.containerIsAlive(pid));
    // Now test the stop functionality.
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds);
    containerManager.stopContainers(stopRequest);
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    int expectedExitCode = ContainerExitStatus.KILLED_BY_APPMASTER;
    Assert.assertEquals(expectedExitCode, containerStatus.getExitStatus());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", DefaultContainerExecutor.containerIsAlive(pid));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) URL(org.apache.hadoop.yarn.api.records.URL) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JarFile(java.util.jar.JarFile) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 45 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class Client method run.

/**
   * Main run function for the client
   * @return true if application completed successfully
   * @throws IOException
   * @throws YarnException
   */
public boolean run() throws IOException, YarnException {
    LOG.info("Running Client");
    yarnClient.start();
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());
    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress=" + node.getHttpAddress() + ", nodeRackName=" + node.getRackName() + ", nodeNumContainers=" + node.getNumContainers());
    }
    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name());
        }
    }
    if (domainId != null && domainId.length() > 0 && toCreateDomain) {
        prepareTimelineDomain();
    }
    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request 
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max. 
    // Dump out information about cluster capability as seen by the resource manager
    long maxMem = appResponse.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);
    // A resource ask cannot exceed the max. 
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }
    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores);
    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }
    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);
    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }
    Set<String> tags = new HashSet<String>();
    if (flowName != null) {
        tags.add(TimelineUtils.generateFlowNameTag(flowName));
    }
    if (flowVersion != null) {
        tags.add(TimelineUtils.generateFlowVersionTag(flowVersion));
    }
    if (flowRunId != 0) {
        tags.add(TimelineUtils.generateFlowRunIdTag(flowRunId));
    }
    appContext.setApplicationTags(tags);
    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources			
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null);
    // Set the log4j properties if needed 
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null);
    }
    // The shell script has to be made available on the final container(s)
    // where it will be executed. 
    // To do this, we need to first copy into the filesystem that is visible 
    // to the yarn framework. 
    // We do not need to set this as a local resource for the application 
    // master as the application master does not need it. 		
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH;
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }
    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand);
    }
    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources, StringUtils.join(shellArgs, " "));
    }
    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);
    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the 
    // eventual containers that will be launched to execute the shell scripts
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));
    if (domainId != null && domainId.length() > 0) {
        env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId);
    }
    // Add AppMaster.jar location to classpath 		
    // At some point we should not be required to add 
    // the hadoop specific classpaths to the env. 
    // It should be provided out of the box. 
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()).append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");
    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    env.put("CLASSPATH", classPathEnv.toString());
    // Set the necessary command to execute the application master 
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);
    // Set java executable command 
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name 
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }
    vargs.add("--priority " + String.valueOf(shellCmdPriority));
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }
    vargs.addAll(containerRetryOptions);
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");
    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null);
    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and 
    // vcores requirements
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);
    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        // For now, only getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }
    appContext.setAMContainerSpec(amContainer);
    // Set the priority for the application master
    // TODO - what is the range for priority? how to decide? 
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);
    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");
    yarnClient.submitApplication(appContext);
    // Monitor the application
    return monitorApplication(appId);
}
Also used : QueueInfo(org.apache.hadoop.yarn.api.records.QueueInfo) GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Vector(java.util.Vector) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) YarnClusterMetrics(org.apache.hadoop.yarn.api.records.YarnClusterMetrics) YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) Priority(org.apache.hadoop.yarn.api.records.Priority) QueueACL(org.apache.hadoop.yarn.api.records.QueueACL) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) QueueUserACLInfo(org.apache.hadoop.yarn.api.records.QueueUserACLInfo) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Map(java.util.Map) HashMap(java.util.HashMap) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Credentials(org.apache.hadoop.security.Credentials)

Aggregations

ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)130 Test (org.junit.Test)57 ArrayList (java.util.ArrayList)54 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)52 HashMap (java.util.HashMap)50 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)50 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)42 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)41 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)40 Path (org.apache.hadoop.fs.Path)37 StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)37 ByteBuffer (java.nio.ByteBuffer)29 Resource (org.apache.hadoop.yarn.api.records.Resource)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)25 IOException (java.io.IOException)24 Credentials (org.apache.hadoop.security.Credentials)23 File (java.io.File)22 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)22 GetContainerStatusesRequest (org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest)20 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)20