Search in sources :

Example 61 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TestSystemMetricsPublisherForV2 method createRMApp.

private static RMApp createRMApp(ApplicationId appId) {
    RMApp app = mock(RMAppImpl.class);
    when(app.getApplicationId()).thenReturn(appId);
    when(app.getName()).thenReturn("test app");
    when(app.getApplicationType()).thenReturn("test app type");
    when(app.getUser()).thenReturn("testUser");
    when(app.getQueue()).thenReturn("test queue");
    when(app.getSubmitTime()).thenReturn(Integer.MAX_VALUE + 1L);
    when(app.getStartTime()).thenReturn(Integer.MAX_VALUE + 2L);
    when(app.getFinishTime()).thenReturn(Integer.MAX_VALUE + 3L);
    when(app.getDiagnostics()).thenReturn(new StringBuilder("test diagnostics info"));
    RMAppAttempt appAttempt = mock(RMAppAttempt.class);
    when(appAttempt.getAppAttemptId()).thenReturn(ApplicationAttemptId.newInstance(appId, 1));
    when(app.getCurrentAppAttempt()).thenReturn(appAttempt);
    when(app.getFinalApplicationStatus()).thenReturn(FinalApplicationStatus.UNDEFINED);
    when(app.getRMAppMetrics()).thenReturn(new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, Integer.MAX_VALUE, Long.MAX_VALUE, 0, 0));
    when(app.getApplicationTags()).thenReturn(Collections.<String>emptySet());
    ApplicationSubmissionContext appSubmissionContext = mock(ApplicationSubmissionContext.class);
    when(appSubmissionContext.getPriority()).thenReturn(Priority.newInstance(0));
    when(app.getApplicationPriority()).thenReturn(Priority.newInstance(10));
    ContainerLaunchContext containerLaunchContext = mock(ContainerLaunchContext.class);
    when(containerLaunchContext.getCommands()).thenReturn(Collections.singletonList("java -Xmx1024m"));
    when(appSubmissionContext.getAMContainerSpec()).thenReturn(containerLaunchContext);
    when(app.getApplicationSubmissionContext()).thenReturn(appSubmissionContext);
    return app;
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) RMAppMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext)

Example 62 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project hadoop by apache.

the class TaskAttemptImpl method createCommonContainerLaunchContext.

/**
   * Create the common {@link ContainerLaunchContext} for all attempts.
   * @param applicationACLs 
   */
private static ContainerLaunchContext createCommonContainerLaunchContext(Map<ApplicationAccessType, String> applicationACLs, Configuration conf, Token<JobTokenIdentifier> jobToken, final org.apache.hadoop.mapred.JobID oldJobId, Credentials credentials) {
    // Application resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    // Application environment
    Map<String, String> environment = new HashMap<String, String>();
    // Service data
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    // Tokens
    ByteBuffer taskCredentialsBuffer = ByteBuffer.wrap(new byte[] {});
    try {
        FileSystem remoteFS = FileSystem.get(conf);
        // //////////// Set up JobJar to be localized properly on the remote NM.
        String jobJar = conf.get(MRJobConfig.JAR);
        if (jobJar != null) {
            final Path jobJarPath = new Path(jobJar);
            final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf);
            Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(), jobJarFs.getWorkingDirectory());
            LocalResource rc = createLocalResource(jobJarFs, remoteJobJar, LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
            String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
            rc.setPattern(pattern);
            localResources.put(MRJobConfig.JOB_JAR, rc);
            LOG.info("The job-jar file on the remote FS is " + remoteJobJar.toUri().toASCIIString());
        } else {
            // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
            // mapreduce jar itself which is already on the classpath.
            LOG.info("Job jar is not present. " + "Not adding any jar to the list of resources.");
        }
        // //////////// End of JobJar setup
        // //////////// Set up JobConf to be localized properly on the remote NM.
        Path path = MRApps.getStagingAreaDir(conf, UserGroupInformation.getCurrentUser().getShortUserName());
        Path remoteJobSubmitDir = new Path(path, oldJobId.toString());
        Path remoteJobConfPath = new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
        localResources.put(MRJobConfig.JOB_CONF_FILE, createLocalResource(remoteFS, remoteJobConfPath, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
        LOG.info("The job-conf file on the remote FS is " + remoteJobConfPath.toUri().toASCIIString());
        // //////////// End of JobConf setup
        // Setup DistributedCache
        MRApps.setupDistributedCache(conf, localResources);
        // Setup up task credentials buffer
        LOG.info("Adding #" + credentials.numberOfTokens() + " tokens and #" + credentials.numberOfSecretKeys() + " secret keys for NM use for launching container");
        Credentials taskCredentials = new Credentials(credentials);
        // LocalStorageToken is needed irrespective of whether security is enabled
        // or not.
        TokenCache.setJobToken(jobToken, taskCredentials);
        DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
        LOG.info("Size of containertokens_dob is " + taskCredentials.numberOfTokens());
        taskCredentials.writeTokenStorageToStream(containerTokens_dob);
        taskCredentialsBuffer = ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength());
        // Add shuffle secret key
        // The secret key is converted to a JobToken to preserve backwards
        // compatibility with an older ShuffleHandler running on an NM.
        LOG.info("Putting shuffle token in serviceData");
        byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
        if (shuffleSecret == null) {
            LOG.warn("Cannot locate shuffle secret in credentials." + " Using job token as shuffle secret.");
            shuffleSecret = jobToken.getPassword();
        }
        Token<JobTokenIdentifier> shuffleToken = new Token<JobTokenIdentifier>(jobToken.getIdentifier(), shuffleSecret, jobToken.getKind(), jobToken.getService());
        serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, ShuffleHandler.serializeServiceData(shuffleToken));
        // add external shuffle-providers - if any
        Collection<String> shuffleProviders = conf.getStringCollection(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES);
        if (!shuffleProviders.isEmpty()) {
            Collection<String> auxNames = conf.getStringCollection(YarnConfiguration.NM_AUX_SERVICES);
            for (final String shuffleProvider : shuffleProviders) {
                if (shuffleProvider.equals(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID)) {
                    // skip built-in shuffle-provider that was already inserted with shuffle secret key
                    continue;
                }
                if (auxNames.contains(shuffleProvider)) {
                    LOG.info("Adding ShuffleProvider Service: " + shuffleProvider + " to serviceData");
                    // This only serves for INIT_APP notifications
                    // The shuffle service needs to be able to work with the host:port information provided by the AM
                    // (i.e. shuffle services which require custom location / other configuration are not supported)
                    serviceData.put(shuffleProvider, ByteBuffer.allocate(0));
                } else {
                    throw new YarnRuntimeException("ShuffleProvider Service: " + shuffleProvider + " was NOT found in the list of aux-services that are available in this NM." + " You may need to specify this ShuffleProvider as an aux-service in your yarn-site.xml");
                }
            }
        }
        MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(), getInitialClasspath(conf), conf);
        if (initialAppClasspath != null) {
            MRApps.addToEnvironment(environment, Environment.APP_CLASSPATH.name(), initialAppClasspath, conf);
        }
    } catch (IOException e) {
        throw new YarnRuntimeException(e);
    }
    // Shell
    environment.put(Environment.SHELL.name(), conf.get(MRJobConfig.MAPRED_ADMIN_USER_SHELL, MRJobConfig.DEFAULT_SHELL));
    // Add pwd to LD_LIBRARY_PATH, add this before adding anything else
    MRApps.addToEnvironment(environment, Environment.LD_LIBRARY_PATH.name(), MRApps.crossPlatformifyMREnv(conf, Environment.PWD), conf);
    // Add the env variables passed by the admin
    MRApps.setEnvFromInputString(environment, conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV, MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV), conf);
    // Construct the actual Container
    // The null fields are per-container and will be constructed for each
    // container separately.
    ContainerLaunchContext container = ContainerLaunchContext.newInstance(localResources, environment, null, serviceData, taskCredentialsBuffer, applicationACLs);
    return container;
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) FileSystem(org.apache.hadoop.fs.FileSystem) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Credentials(org.apache.hadoop.security.Credentials)

Example 63 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project alluxio by Alluxio.

the class ApplicationMaster method launchMasterContainer.

private void launchMasterContainer(Container container) {
    String command = YarnUtils.buildCommand(YarnContainerType.ALLUXIO_MASTER);
    try {
        ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
        ctx.setCommands(Lists.newArrayList(command));
        ctx.setLocalResources(setupLocalResources(mResourcePath));
        ctx.setEnvironment(setupMasterEnvironment());
        if (UserGroupInformation.isSecurityEnabled()) {
            ctx.setTokens(mAllTokens.duplicate());
        }
        LOG.info("Launching container {} for Alluxio master on {} with master command: {}", container.getId(), container.getNodeHttpAddress(), command);
        mNMClient.startContainer(container, ctx);
        // in the form of 1.2.3.4:8042
        String containerUri = container.getNodeHttpAddress();
        mMasterContainerNetAddress = containerUri.split(":")[0];
        LOG.info("Master address: {}", mMasterContainerNetAddress);
        return;
    } catch (Exception e) {
        LOG.error("Error launching container {}", container.getId(), e);
    }
}
Also used : ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException)

Example 64 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project alluxio by Alluxio.

the class ApplicationMaster method launchWorkerContainer.

private void launchWorkerContainer(Container container) {
    String command = YarnUtils.buildCommand(YarnContainerType.ALLUXIO_WORKER);
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    ctx.setCommands(Lists.newArrayList(command));
    ctx.setLocalResources(setupLocalResources(mResourcePath));
    ctx.setEnvironment(setupWorkerEnvironment(mMasterContainerNetAddress, mRamdiskMemInMB));
    if (UserGroupInformation.isSecurityEnabled()) {
        ctx.setTokens(mAllTokens.duplicate());
    }
    try {
        LOG.info("Launching container {} for Alluxio worker on {} with worker command: {}", container.getId(), container.getNodeHttpAddress(), command);
        mNMClient.startContainer(container, ctx);
    } catch (Exception e) {
        LOG.error("Error launching container {}", container.getId(), e);
    }
}
Also used : ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException)

Example 65 with ContainerLaunchContext

use of org.apache.hadoop.yarn.api.records.ContainerLaunchContext in project apex-core by apache.

the class LaunchContainerRunnable method run.

/**
   * Connects to CM, sets up container launch context and eventually dispatches the container start request to the CM.
   */
@Override
public void run() {
    LOG.info("Setting up container launch context for containerid={}", container.getId());
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    setClasspath(containerEnv);
    // Setup ACLs for the impersonating user
    try {
        String launchPrincipal = System.getenv("HADOOP_USER_NAME");
        LOG.debug("Launch principal {}", launchPrincipal);
        if (launchPrincipal != null) {
            String launchUserName = launchPrincipal;
            if (UserGroupInformation.isSecurityEnabled()) {
                try {
                    launchUserName = new HadoopKerberosName(launchPrincipal).getShortName();
                } catch (Exception ex) {
                    LOG.warn("Error resolving kerberos principal {}", launchPrincipal, ex);
                }
            }
            LOG.debug("ACL launch user {} current user {}", launchUserName, UserGroupInformation.getCurrentUser().getShortUserName());
            if (!UserGroupInformation.getCurrentUser().getShortUserName().equals(launchUserName)) {
                ACLManager.setupUserACLs(ctx, launchUserName, nmClient.getConfig());
            }
        }
    } catch (IOException e) {
        LOG.warn("Unable to setup user acls for container {}", container.getId(), e);
    }
    try {
        // propagate to replace node managers user name (effective in non-secure mode)
        containerEnv.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());
    } catch (Exception e) {
        LOG.error("Failed to retrieve principal name", e);
    }
    // Set the environment
    ctx.setEnvironment(containerEnv);
    ctx.setTokens(tokens);
    // Set the local resources
    Map<String, LocalResource> localResources = new HashMap<>();
    // add resources for child VM
    try {
        // child VM dependencies
        try (FileSystem fs = StramClientUtils.newFileSystemInstance(nmClient.getConfig())) {
            addFilesToLocalResources(LocalResourceType.FILE, dag.getAttributes().get(Context.DAGContext.LIBRARY_JARS), localResources, fs);
            String archives = dag.getAttributes().get(LogicalPlan.ARCHIVES);
            if (archives != null) {
                addFilesToLocalResources(LocalResourceType.ARCHIVE, archives, localResources, fs);
            }
            ctx.setLocalResources(localResources);
        }
    } catch (IOException e) {
        LOG.error("Failed to prepare local resources.", e);
        return;
    }
    // Set the necessary command to execute on the allocated container
    List<CharSequence> vargs = getChildVMCommand(container.getId().toString());
    // Get final command
    StringBuilder command = new StringBuilder(1024);
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Launching on node: {} command: {}", container.getNodeId(), command);
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());
    ctx.setCommands(commands);
    nmClient.startContainerAsync(container, ctx);
}
Also used : HashMap(java.util.HashMap) HadoopKerberosName(org.apache.hadoop.security.HadoopKerberosName) ArrayList(java.util.ArrayList) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) IOException(java.io.IOException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) FileSystem(org.apache.hadoop.fs.FileSystem)

Aggregations

ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)130 Test (org.junit.Test)57 ArrayList (java.util.ArrayList)54 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)52 HashMap (java.util.HashMap)50 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)50 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)42 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)41 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)40 Path (org.apache.hadoop.fs.Path)37 StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)37 ByteBuffer (java.nio.ByteBuffer)29 Resource (org.apache.hadoop.yarn.api.records.Resource)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)25 IOException (java.io.IOException)24 Credentials (org.apache.hadoop.security.Credentials)23 File (java.io.File)22 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)22 GetContainerStatusesRequest (org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest)20 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)20