Search in sources :

Example 46 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class AdminService method getAdminAclList.

private AccessControlList getAdminAclList(Configuration conf) {
    AccessControlList aclList = new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
    aclList.addUser(daemonUser.getShortUserName());
    return aclList;
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList)

Example 47 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class TestGetImageServlet method testIsValidRequestor.

@Test
public void testIsValidRequestor() throws IOException {
    Configuration conf = new HdfsConfiguration();
    KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
    // Set up generic HA configs.
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "nn1,nn2");
    // Set up NN1 HA configs.
    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), "host1:1234");
    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "ns1", "nn1"), "hdfs/_HOST@TEST-REALM.COM");
    // Set up NN2 HA configs.
    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), "host2:1234");
    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "ns1", "nn2"), "hdfs/_HOST@TEST-REALM.COM");
    // Initialize this conf object as though we're running on NN1.
    NameNode.initializeGenericKeys(conf, "ns1", "nn1");
    AccessControlList acls = Mockito.mock(AccessControlList.class);
    Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
    ServletContext context = Mockito.mock(ServletContext.class);
    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
    // Make sure that NN2 is considered a valid fsimage/edits requestor.
    assertTrue(ImageServlet.isValidRequestor(context, "hdfs/host2@TEST-REALM.COM", conf));
    // Mark atm as an admin.
    Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {

        @Override
        public boolean matches(Object argument) {
            return ((UserGroupInformation) argument).getShortUserName().equals("atm");
        }
    }))).thenReturn(true);
    // Make sure that NN2 is still considered a valid requestor.
    assertTrue(ImageServlet.isValidRequestor(context, "hdfs/host2@TEST-REALM.COM", conf));
    // Make sure an admin is considered a valid requestor.
    assertTrue(ImageServlet.isValidRequestor(context, "atm@TEST-REALM.COM", conf));
    // Make sure other users are *not* considered valid requestors.
    assertFalse(ImageServlet.isValidRequestor(context, "todd@TEST-REALM.COM", conf));
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) ArgumentMatcher(org.mockito.ArgumentMatcher) ServletContext(javax.servlet.ServletContext) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 48 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class MockJobs method newJob.

public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
    final JobId id = newJobID(appID, i);
    final String name = newJobName();
    final JobReport report = newJobReport(id);
    final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
    final TaskCount taskCount = getTaskCount(tasks.values());
    final Counters counters = getCounters(tasks.values());
    final Path configFile = confFile;
    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
    final Configuration conf = new Configuration();
    conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
    JobACLsManager aclsManager = new JobACLsManager(conf);
    tmpJobACLs = aclsManager.constructJobACLs(conf);
    final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
    return new Job() {

        @Override
        public JobId getID() {
            return id;
        }

        @Override
        public String getName() {
            return name;
        }

        @Override
        public JobState getState() {
            return report.getJobState();
        }

        @Override
        public JobReport getReport() {
            return report;
        }

        @Override
        public float getProgress() {
            return 0;
        }

        @Override
        public Counters getAllCounters() {
            return counters;
        }

        @Override
        public Map<TaskId, Task> getTasks() {
            return tasks;
        }

        @Override
        public Task getTask(TaskId taskID) {
            return tasks.get(taskID);
        }

        @Override
        public int getTotalMaps() {
            return taskCount.maps;
        }

        @Override
        public int getTotalReduces() {
            return taskCount.reduces;
        }

        @Override
        public int getCompletedMaps() {
            return taskCount.completedMaps;
        }

        @Override
        public int getCompletedReduces() {
            return taskCount.completedReduces;
        }

        @Override
        public boolean isUber() {
            return false;
        }

        @Override
        public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
            return null;
        }

        @Override
        public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
            return null;
        }

        @Override
        public Map<TaskId, Task> getTasks(TaskType taskType) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        @Override
        public List<String> getDiagnostics() {
            return Collections.<String>emptyList();
        }

        @Override
        public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
            return true;
        }

        @Override
        public String getUserName() {
            return "mock";
        }

        @Override
        public String getQueueName() {
            return "mockqueue";
        }

        @Override
        public Path getConfFile() {
            return configFile;
        }

        @Override
        public Map<JobACL, AccessControlList> getJobACLs() {
            return jobACLs;
        }

        @Override
        public List<AMInfo> getAMInfos() {
            List<AMInfo> amInfoList = new LinkedList<AMInfo>();
            amInfoList.add(createAMInfo(1));
            amInfoList.add(createAMInfo(2));
            return amInfoList;
        }

        @Override
        public Configuration loadConfFile() throws IOException {
            FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
            Configuration jobConf = new Configuration(false);
            jobConf.addResource(fc.open(configFile), configFile.toString());
            return jobConf;
        }

        @Override
        public void setQueueName(String queueName) {
        // do nothing
        }

        @Override
        public void setJobPriority(Priority priority) {
        // do nothing
        }
    };
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) JobACLsManager(org.apache.hadoop.mapred.JobACLsManager) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Path(org.apache.hadoop.fs.Path) Priority(org.apache.hadoop.yarn.api.records.Priority) LinkedList(java.util.LinkedList) AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) Counters(org.apache.hadoop.mapreduce.Counters) JobACL(org.apache.hadoop.mapreduce.JobACL) FileContext(org.apache.hadoop.fs.FileContext)

Example 49 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testTimelineEventHandling.

// Have JobHistoryEventHandler handle some events and make sure they get
// stored to the Timeline store
@Test(timeout = 50000)
public void testTimelineEventHandling() throws Exception {
    TestParams t = new TestParams(RunningAppContext.class, false);
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    long currentTime = System.currentTimeMillis();
    try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1)) {
        yarnCluster.init(conf);
        yarnCluster.start();
        Configuration confJHEH = new YarnConfiguration(conf);
        confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
        confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());
        JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
        jheh.init(confJHEH);
        jheh.start();
        TimelineStore ts = yarnCluster.getApplicationHistoryServer().getTimelineStore();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1), currentTime - 10));
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(0).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobSubmittedEvent(TypeConverter.fromYarn(t.jobId), "name", "user", 200, "/foo/job.xml", new HashMap<JobACL, AccessControlList>(), "default"), currentTime + 10));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"), currentTime - 20));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(3, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(2).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(4, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(3).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(5, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_KILLED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(4).getEventType());
        Assert.assertEquals(currentTime + 20, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(3).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(4).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(TaskType.REDUCE.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE"));
    }
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TimelineEntities(org.apache.hadoop.yarn.api.records.timeline.TimelineEntities) Counters(org.apache.hadoop.mapreduce.Counters) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) TimelineStore(org.apache.hadoop.yarn.server.timeline.TimelineStore) JobACL(org.apache.hadoop.mapreduce.JobACL) Test(org.junit.Test)

Example 50 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JobSubmitter method submitJobInternal.

/**
   * Internal method for submitting jobs to the system.
   * 
   * <p>The job submission process involves:
   * <ol>
   *   <li>
   *   Checking the input and output specifications of the job.
   *   </li>
   *   <li>
   *   Computing the {@link InputSplit}s for the job.
   *   </li>
   *   <li>
   *   Setup the requisite accounting information for the 
   *   {@link DistributedCache} of the job, if necessary.
   *   </li>
   *   <li>
   *   Copying the job's jar and configuration to the map-reduce system
   *   directory on the distributed file-system. 
   *   </li>
   *   <li>
   *   Submitting the job to the <code>JobTracker</code> and optionally
   *   monitoring it's status.
   *   </li>
   * </ol></p>
   * @param job the configuration to submit
   * @param cluster the handle to the Cluster
   * @throws ClassNotFoundException
   * @throws InterruptedException
   * @throws IOException
   */
JobStatus submitJobInternal(Job job, Cluster cluster) throws ClassNotFoundException, InterruptedException, IOException {
    //validate the jobs output specs 
    checkSpecs(job);
    Configuration conf = job.getConfiguration();
    addMRFrameworkToDistributedCache(conf);
    Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster, conf);
    //configure the command line options correctly on the submitting dfs
    InetAddress ip = InetAddress.getLocalHost();
    if (ip != null) {
        submitHostAddress = ip.getHostAddress();
        submitHostName = ip.getHostName();
        conf.set(MRJobConfig.JOB_SUBMITHOST, submitHostName);
        conf.set(MRJobConfig.JOB_SUBMITHOSTADDR, submitHostAddress);
    }
    JobID jobId = submitClient.getNewJobID();
    job.setJobID(jobId);
    Path submitJobDir = new Path(jobStagingArea, jobId.toString());
    JobStatus status = null;
    try {
        conf.set(MRJobConfig.USER_NAME, UserGroupInformation.getCurrentUser().getShortUserName());
        conf.set("hadoop.http.filter.initializers", "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");
        conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString());
        LOG.debug("Configuring job " + jobId + " with " + submitJobDir + " as the submit dir");
        // get delegation token for the dir
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { submitJobDir }, conf);
        populateTokenCache(conf, job.getCredentials());
        // generate a secret to authenticate shuffle transfers
        if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) {
            KeyGenerator keyGen;
            try {
                keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM);
                keyGen.init(SHUFFLE_KEY_LENGTH);
            } catch (NoSuchAlgorithmException e) {
                throw new IOException("Error generating shuffle secret key", e);
            }
            SecretKey shuffleKey = keyGen.generateKey();
            TokenCache.setShuffleSecretKey(shuffleKey.getEncoded(), job.getCredentials());
        }
        if (CryptoUtils.isEncryptedSpillEnabled(conf)) {
            conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 1);
            LOG.warn("Max job attempts set to 1 since encrypted intermediate" + "data spill is enabled");
        }
        copyAndConfigureFiles(job, submitJobDir);
        Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
        // Create the splits for the job
        LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
        int maps = writeSplits(job, submitJobDir);
        conf.setInt(MRJobConfig.NUM_MAPS, maps);
        LOG.info("number of splits:" + maps);
        int maxMaps = conf.getInt(MRJobConfig.JOB_MAX_MAP, MRJobConfig.DEFAULT_JOB_MAX_MAP);
        if (maxMaps >= 0 && maxMaps < maps) {
            throw new IllegalArgumentException("The number of map tasks " + maps + " exceeded limit " + maxMaps);
        }
        // write "queue admins of the queue to which job is being submitted"
        // to job file.
        String queue = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME);
        AccessControlList acl = submitClient.getQueueAdmins(queue);
        conf.set(toFullPropertyName(queue, QueueACL.ADMINISTER_JOBS.getAclName()), acl.getAclString());
        // removing jobtoken referrals before copying the jobconf to HDFS
        // as the tasks don't need this setting, actually they may break
        // because of it if present as the referral will point to a
        // different job.
        TokenCache.cleanUpTokenReferral(conf);
        if (conf.getBoolean(MRJobConfig.JOB_TOKEN_TRACKING_IDS_ENABLED, MRJobConfig.DEFAULT_JOB_TOKEN_TRACKING_IDS_ENABLED)) {
            // Add HDFS tracking ids
            ArrayList<String> trackingIds = new ArrayList<String>();
            for (Token<? extends TokenIdentifier> t : job.getCredentials().getAllTokens()) {
                trackingIds.add(t.decodeIdentifier().getTrackingId());
            }
            conf.setStrings(MRJobConfig.JOB_TOKEN_TRACKING_IDS, trackingIds.toArray(new String[trackingIds.size()]));
        }
        // Set reservation info if it exists
        ReservationId reservationId = job.getReservationId();
        if (reservationId != null) {
            conf.set(MRJobConfig.RESERVATION_ID, reservationId.toString());
        }
        // Write job file to submit dir
        writeConf(conf, submitJobFile);
        Limits.reset(conf);
        //
        // Now, actually submit the job (using the submit name)
        //
        printTokens(jobId, job.getCredentials());
        status = submitClient.submitJob(jobId, submitJobDir.toString(), job.getCredentials());
        if (status != null) {
            return status;
        } else {
            throw new IOException("Could not launch job");
        }
    } finally {
        if (status == null) {
            LOG.info("Cleaning up the staging area " + submitJobDir);
            if (jtFs != null && submitJobDir != null)
                jtFs.delete(submitJobDir, true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) IOException(java.io.IOException) SecretKey(javax.crypto.SecretKey) ReservationId(org.apache.hadoop.yarn.api.records.ReservationId) InetAddress(java.net.InetAddress) KeyGenerator(javax.crypto.KeyGenerator)

Aggregations

AccessControlList (org.apache.hadoop.security.authorize.AccessControlList)78 Configuration (org.apache.hadoop.conf.Configuration)24 HashMap (java.util.HashMap)22 Test (org.junit.Test)17 JobACL (org.apache.hadoop.mapreduce.JobACL)10 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)10 Map (java.util.Map)6 KeyOpType (org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType)6 URI (java.net.URI)5 ServletContext (javax.servlet.ServletContext)5 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)5 GetApplicationReportRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)5 KillApplicationRequest (org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest)5 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 HttpServletRequest (javax.servlet.http.HttpServletRequest)4 HttpServletResponse (javax.servlet.http.HttpServletResponse)4 KMSConfiguration (org.apache.hadoop.crypto.key.kms.server.KMSConfiguration)4 ApplicationAccessType (org.apache.hadoop.yarn.api.records.ApplicationAccessType)4