Search in sources :

Example 61 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TaskAttemptImpl method createCommonContainerLaunchContext.

/**
   * Create the common {@link ContainerLaunchContext} for all attempts.
   * @param applicationACLs 
   */
private static ContainerLaunchContext createCommonContainerLaunchContext(Map<ApplicationAccessType, String> applicationACLs, Configuration conf, Token<JobTokenIdentifier> jobToken, final org.apache.hadoop.mapred.JobID oldJobId, Credentials credentials) {
    // Application resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    // Application environment
    Map<String, String> environment = new HashMap<String, String>();
    // Service data
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    // Tokens
    ByteBuffer taskCredentialsBuffer = ByteBuffer.wrap(new byte[] {});
    try {
        FileSystem remoteFS = FileSystem.get(conf);
        // //////////// Set up JobJar to be localized properly on the remote NM.
        String jobJar = conf.get(MRJobConfig.JAR);
        if (jobJar != null) {
            final Path jobJarPath = new Path(jobJar);
            final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf);
            Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(), jobJarFs.getWorkingDirectory());
            LocalResource rc = createLocalResource(jobJarFs, remoteJobJar, LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
            String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
            rc.setPattern(pattern);
            localResources.put(MRJobConfig.JOB_JAR, rc);
            LOG.info("The job-jar file on the remote FS is " + remoteJobJar.toUri().toASCIIString());
        } else {
            // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
            // mapreduce jar itself which is already on the classpath.
            LOG.info("Job jar is not present. " + "Not adding any jar to the list of resources.");
        }
        // //////////// End of JobJar setup
        // //////////// Set up JobConf to be localized properly on the remote NM.
        Path path = MRApps.getStagingAreaDir(conf, UserGroupInformation.getCurrentUser().getShortUserName());
        Path remoteJobSubmitDir = new Path(path, oldJobId.toString());
        Path remoteJobConfPath = new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
        localResources.put(MRJobConfig.JOB_CONF_FILE, createLocalResource(remoteFS, remoteJobConfPath, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
        LOG.info("The job-conf file on the remote FS is " + remoteJobConfPath.toUri().toASCIIString());
        // //////////// End of JobConf setup
        // Setup DistributedCache
        MRApps.setupDistributedCache(conf, localResources);
        // Setup up task credentials buffer
        LOG.info("Adding #" + credentials.numberOfTokens() + " tokens and #" + credentials.numberOfSecretKeys() + " secret keys for NM use for launching container");
        Credentials taskCredentials = new Credentials(credentials);
        // LocalStorageToken is needed irrespective of whether security is enabled
        // or not.
        TokenCache.setJobToken(jobToken, taskCredentials);
        DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
        LOG.info("Size of containertokens_dob is " + taskCredentials.numberOfTokens());
        taskCredentials.writeTokenStorageToStream(containerTokens_dob);
        taskCredentialsBuffer = ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength());
        // Add shuffle secret key
        // The secret key is converted to a JobToken to preserve backwards
        // compatibility with an older ShuffleHandler running on an NM.
        LOG.info("Putting shuffle token in serviceData");
        byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
        if (shuffleSecret == null) {
            LOG.warn("Cannot locate shuffle secret in credentials." + " Using job token as shuffle secret.");
            shuffleSecret = jobToken.getPassword();
        }
        Token<JobTokenIdentifier> shuffleToken = new Token<JobTokenIdentifier>(jobToken.getIdentifier(), shuffleSecret, jobToken.getKind(), jobToken.getService());
        serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, ShuffleHandler.serializeServiceData(shuffleToken));
        // add external shuffle-providers - if any
        Collection<String> shuffleProviders = conf.getStringCollection(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES);
        if (!shuffleProviders.isEmpty()) {
            Collection<String> auxNames = conf.getStringCollection(YarnConfiguration.NM_AUX_SERVICES);
            for (final String shuffleProvider : shuffleProviders) {
                if (shuffleProvider.equals(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID)) {
                    // skip built-in shuffle-provider that was already inserted with shuffle secret key
                    continue;
                }
                if (auxNames.contains(shuffleProvider)) {
                    LOG.info("Adding ShuffleProvider Service: " + shuffleProvider + " to serviceData");
                    // This only serves for INIT_APP notifications
                    // The shuffle service needs to be able to work with the host:port information provided by the AM
                    // (i.e. shuffle services which require custom location / other configuration are not supported)
                    serviceData.put(shuffleProvider, ByteBuffer.allocate(0));
                } else {
                    throw new YarnRuntimeException("ShuffleProvider Service: " + shuffleProvider + " was NOT found in the list of aux-services that are available in this NM." + " You may need to specify this ShuffleProvider as an aux-service in your yarn-site.xml");
                }
            }
        }
        MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(), getInitialClasspath(conf), conf);
        if (initialAppClasspath != null) {
            MRApps.addToEnvironment(environment, Environment.APP_CLASSPATH.name(), initialAppClasspath, conf);
        }
    } catch (IOException e) {
        throw new YarnRuntimeException(e);
    }
    // Shell
    environment.put(Environment.SHELL.name(), conf.get(MRJobConfig.MAPRED_ADMIN_USER_SHELL, MRJobConfig.DEFAULT_SHELL));
    // Add pwd to LD_LIBRARY_PATH, add this before adding anything else
    MRApps.addToEnvironment(environment, Environment.LD_LIBRARY_PATH.name(), MRApps.crossPlatformifyMREnv(conf, Environment.PWD), conf);
    // Add the env variables passed by the admin
    MRApps.setEnvFromInputString(environment, conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV, MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV), conf);
    // Construct the actual Container
    // The null fields are per-container and will be constructed for each
    // container separately.
    ContainerLaunchContext container = ContainerLaunchContext.newInstance(localResources, environment, null, serviceData, taskCredentialsBuffer, applicationACLs);
    return container;
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) FileSystem(org.apache.hadoop.fs.FileSystem) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Credentials(org.apache.hadoop.security.Credentials)

Example 62 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project alluxio by Alluxio.

the class ApplicationMaster method start.

/**
   * Starts the application master.
   *
   * @throws IOException if registering the application master fails due to an IO error
   * @throws YarnException if registering the application master fails due to an internal Yarn error
   */
public void start() throws IOException, YarnException {
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer credentialsBuffer = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(credentialsBuffer);
        // Now remove the AM -> RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        mAllTokens = ByteBuffer.wrap(credentialsBuffer.getData(), 0, credentialsBuffer.getLength());
    }
    mNMClient.init(mYarnConf);
    mNMClient.start();
    mRMClient.init(mYarnConf);
    mRMClient.start();
    mYarnClient.init(mYarnConf);
    mYarnClient.start();
    // Register with ResourceManager
    String hostname = NetworkAddressUtils.getLocalHostName();
    mRMClient.registerApplicationMaster(hostname, 0, /* port */
    "");
    LOG.info("ApplicationMaster registered");
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Token(org.apache.hadoop.security.token.Token) Credentials(org.apache.hadoop.security.Credentials)

Example 63 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project alluxio by Alluxio.

the class Client method setupContainerLaunchContext.

private void setupContainerLaunchContext() throws IOException, YarnException {
    Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers", Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath);
    final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER, applicationMasterArgs);
    System.out.println("ApplicationMaster command: " + amCommand);
    mAmContainer.setCommands(Collections.singletonList(amCommand));
    // Setup local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("alluxio.tar.gz", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz"));
    localResources.put("alluxio-yarn-setup.sh", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh"));
    localResources.put("alluxio.jar", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar"));
    mAmContainer.setLocalResources(localResources);
    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    mAmContainer.setEnvironment(appMasterEnv);
    // Set up security tokens for launching our ApplicationMaster container.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf);
        // getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        // getting yarn resource manager token
        org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig();
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)), ClientRMProxy.getRMDelegationTokenService(config));
        LOG.info("Added RM delegation token: " + token);
        credentials.addToken(token.getService(), token);
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        mAmContainer.setTokens(buffer);
    }
}
Also used : TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) HashMap(java.util.HashMap) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Credentials(org.apache.hadoop.security.Credentials)

Example 64 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project apex-core by apache.

the class LaunchContainerRunnable method getTokens.

public static ByteBuffer getTokens(UserGroupInformation ugi, Token<StramDelegationTokenIdentifier> delegationToken) {
    try {
        Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
        Credentials credentials = new Credentials();
        for (Token<? extends TokenIdentifier> token : tokens) {
            if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                credentials.addToken(token.getService(), token);
                LOG.debug("Passing container token {}", token);
            }
        }
        credentials.addToken(delegationToken.getService(), delegationToken);
        DataOutputBuffer dataOutput = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dataOutput);
        byte[] tokenBytes = dataOutput.getData();
        ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes);
        return cTokenBuf.duplicate();
    } catch (IOException e) {
        throw new RuntimeException("Error generating delegation token", e);
    }
}
Also used : AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) StramDelegationTokenIdentifier(com.datatorrent.stram.security.StramDelegationTokenIdentifier) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) Credentials(org.apache.hadoop.security.Credentials)

Example 65 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project gora by apache.

the class TestIOUtils method testNullFieldsWith.

private void testNullFieldsWith(Object... values) throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    DataInputBuffer in = new DataInputBuffer();
    IOUtils.writeNullFieldsInfo(out, values);
    in.reset(out.getData(), out.getLength());
    boolean[] ret = IOUtils.readNullFieldsInfo(in);
    //assert
    assertEquals(values.length, ret.length);
    for (int i = 0; i < values.length; i++) {
        assertEquals(values[i] == null, ret[i]);
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Aggregations

DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)132 Test (org.junit.Test)48 Credentials (org.apache.hadoop.security.Credentials)37 ByteBuffer (java.nio.ByteBuffer)36 DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)36 IOException (java.io.IOException)34 Configuration (org.apache.hadoop.conf.Configuration)25 Token (org.apache.hadoop.security.token.Token)25 Path (org.apache.hadoop.fs.Path)21 HashMap (java.util.HashMap)20 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)20 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)18 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 Random (java.util.Random)15 DataInputStream (java.io.DataInputStream)14 Text (org.apache.hadoop.io.Text)14 ArrayList (java.util.ArrayList)13 Map (java.util.Map)10 FileSystem (org.apache.hadoop.fs.FileSystem)10