Search in sources :

Example 41 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class MockJobs method newJob.

public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
    final JobId id = newJobID(appID, i);
    final String name = newJobName();
    final JobReport report = newJobReport(id);
    final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
    final TaskCount taskCount = getTaskCount(tasks.values());
    final Counters counters = getCounters(tasks.values());
    final Path configFile = confFile;
    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
    final Configuration conf = new Configuration();
    conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
    JobACLsManager aclsManager = new JobACLsManager(conf);
    tmpJobACLs = aclsManager.constructJobACLs(conf);
    final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
    return new Job() {

        @Override
        public JobId getID() {
            return id;
        }

        @Override
        public String getName() {
            return name;
        }

        @Override
        public JobState getState() {
            return report.getJobState();
        }

        @Override
        public JobReport getReport() {
            return report;
        }

        @Override
        public float getProgress() {
            return 0;
        }

        @Override
        public Counters getAllCounters() {
            return counters;
        }

        @Override
        public Map<TaskId, Task> getTasks() {
            return tasks;
        }

        @Override
        public Task getTask(TaskId taskID) {
            return tasks.get(taskID);
        }

        @Override
        public int getTotalMaps() {
            return taskCount.maps;
        }

        @Override
        public int getTotalReduces() {
            return taskCount.reduces;
        }

        @Override
        public int getCompletedMaps() {
            return taskCount.completedMaps;
        }

        @Override
        public int getCompletedReduces() {
            return taskCount.completedReduces;
        }

        @Override
        public boolean isUber() {
            return false;
        }

        @Override
        public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
            return null;
        }

        @Override
        public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
            return null;
        }

        @Override
        public Map<TaskId, Task> getTasks(TaskType taskType) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        @Override
        public List<String> getDiagnostics() {
            return Collections.<String>emptyList();
        }

        @Override
        public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
            return true;
        }

        @Override
        public String getUserName() {
            return "mock";
        }

        @Override
        public String getQueueName() {
            return "mockqueue";
        }

        @Override
        public Path getConfFile() {
            return configFile;
        }

        @Override
        public Map<JobACL, AccessControlList> getJobACLs() {
            return jobACLs;
        }

        @Override
        public List<AMInfo> getAMInfos() {
            List<AMInfo> amInfoList = new LinkedList<AMInfo>();
            amInfoList.add(createAMInfo(1));
            amInfoList.add(createAMInfo(2));
            return amInfoList;
        }

        @Override
        public Configuration loadConfFile() throws IOException {
            FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
            Configuration jobConf = new Configuration(false);
            jobConf.addResource(fc.open(configFile), configFile.toString());
            return jobConf;
        }

        @Override
        public void setQueueName(String queueName) {
        // do nothing
        }

        @Override
        public void setJobPriority(Priority priority) {
        // do nothing
        }
    };
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) JobACLsManager(org.apache.hadoop.mapred.JobACLsManager) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Path(org.apache.hadoop.fs.Path) Priority(org.apache.hadoop.yarn.api.records.Priority) LinkedList(java.util.LinkedList) AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) Counters(org.apache.hadoop.mapreduce.Counters) JobACL(org.apache.hadoop.mapreduce.JobACL) FileContext(org.apache.hadoop.fs.FileContext)

Example 42 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testTimelineEventHandling.

// Have JobHistoryEventHandler handle some events and make sure they get
// stored to the Timeline store
@Test(timeout = 50000)
public void testTimelineEventHandling() throws Exception {
    TestParams t = new TestParams(RunningAppContext.class, false);
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    long currentTime = System.currentTimeMillis();
    try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1)) {
        yarnCluster.init(conf);
        yarnCluster.start();
        Configuration confJHEH = new YarnConfiguration(conf);
        confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
        confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());
        JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
        jheh.init(confJHEH);
        jheh.start();
        TimelineStore ts = yarnCluster.getApplicationHistoryServer().getTimelineStore();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1), currentTime - 10));
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(0).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobSubmittedEvent(TypeConverter.fromYarn(t.jobId), "name", "user", 200, "/foo/job.xml", new HashMap<JobACL, AccessControlList>(), "default"), currentTime + 10));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"), currentTime - 20));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(3, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(2).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(4, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(3).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(5, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_KILLED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(4).getEventType());
        Assert.assertEquals(currentTime + 20, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(3).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(4).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(TaskType.REDUCE.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE"));
    }
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TimelineEntities(org.apache.hadoop.yarn.api.records.timeline.TimelineEntities) Counters(org.apache.hadoop.mapreduce.Counters) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) TimelineStore(org.apache.hadoop.yarn.server.timeline.TimelineStore) JobACL(org.apache.hadoop.mapreduce.JobACL) Test(org.junit.Test)

Example 43 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JobSubmitter method submitJobInternal.

/**
   * Internal method for submitting jobs to the system.
   * 
   * <p>The job submission process involves:
   * <ol>
   *   <li>
   *   Checking the input and output specifications of the job.
   *   </li>
   *   <li>
   *   Computing the {@link InputSplit}s for the job.
   *   </li>
   *   <li>
   *   Setup the requisite accounting information for the 
   *   {@link DistributedCache} of the job, if necessary.
   *   </li>
   *   <li>
   *   Copying the job's jar and configuration to the map-reduce system
   *   directory on the distributed file-system. 
   *   </li>
   *   <li>
   *   Submitting the job to the <code>JobTracker</code> and optionally
   *   monitoring it's status.
   *   </li>
   * </ol></p>
   * @param job the configuration to submit
   * @param cluster the handle to the Cluster
   * @throws ClassNotFoundException
   * @throws InterruptedException
   * @throws IOException
   */
JobStatus submitJobInternal(Job job, Cluster cluster) throws ClassNotFoundException, InterruptedException, IOException {
    //validate the jobs output specs 
    checkSpecs(job);
    Configuration conf = job.getConfiguration();
    addMRFrameworkToDistributedCache(conf);
    Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster, conf);
    //configure the command line options correctly on the submitting dfs
    InetAddress ip = InetAddress.getLocalHost();
    if (ip != null) {
        submitHostAddress = ip.getHostAddress();
        submitHostName = ip.getHostName();
        conf.set(MRJobConfig.JOB_SUBMITHOST, submitHostName);
        conf.set(MRJobConfig.JOB_SUBMITHOSTADDR, submitHostAddress);
    }
    JobID jobId = submitClient.getNewJobID();
    job.setJobID(jobId);
    Path submitJobDir = new Path(jobStagingArea, jobId.toString());
    JobStatus status = null;
    try {
        conf.set(MRJobConfig.USER_NAME, UserGroupInformation.getCurrentUser().getShortUserName());
        conf.set("hadoop.http.filter.initializers", "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");
        conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString());
        LOG.debug("Configuring job " + jobId + " with " + submitJobDir + " as the submit dir");
        // get delegation token for the dir
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { submitJobDir }, conf);
        populateTokenCache(conf, job.getCredentials());
        // generate a secret to authenticate shuffle transfers
        if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) {
            KeyGenerator keyGen;
            try {
                keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM);
                keyGen.init(SHUFFLE_KEY_LENGTH);
            } catch (NoSuchAlgorithmException e) {
                throw new IOException("Error generating shuffle secret key", e);
            }
            SecretKey shuffleKey = keyGen.generateKey();
            TokenCache.setShuffleSecretKey(shuffleKey.getEncoded(), job.getCredentials());
        }
        if (CryptoUtils.isEncryptedSpillEnabled(conf)) {
            conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 1);
            LOG.warn("Max job attempts set to 1 since encrypted intermediate" + "data spill is enabled");
        }
        copyAndConfigureFiles(job, submitJobDir);
        Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
        // Create the splits for the job
        LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
        int maps = writeSplits(job, submitJobDir);
        conf.setInt(MRJobConfig.NUM_MAPS, maps);
        LOG.info("number of splits:" + maps);
        int maxMaps = conf.getInt(MRJobConfig.JOB_MAX_MAP, MRJobConfig.DEFAULT_JOB_MAX_MAP);
        if (maxMaps >= 0 && maxMaps < maps) {
            throw new IllegalArgumentException("The number of map tasks " + maps + " exceeded limit " + maxMaps);
        }
        // write "queue admins of the queue to which job is being submitted"
        // to job file.
        String queue = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME);
        AccessControlList acl = submitClient.getQueueAdmins(queue);
        conf.set(toFullPropertyName(queue, QueueACL.ADMINISTER_JOBS.getAclName()), acl.getAclString());
        // removing jobtoken referrals before copying the jobconf to HDFS
        // as the tasks don't need this setting, actually they may break
        // because of it if present as the referral will point to a
        // different job.
        TokenCache.cleanUpTokenReferral(conf);
        if (conf.getBoolean(MRJobConfig.JOB_TOKEN_TRACKING_IDS_ENABLED, MRJobConfig.DEFAULT_JOB_TOKEN_TRACKING_IDS_ENABLED)) {
            // Add HDFS tracking ids
            ArrayList<String> trackingIds = new ArrayList<String>();
            for (Token<? extends TokenIdentifier> t : job.getCredentials().getAllTokens()) {
                trackingIds.add(t.decodeIdentifier().getTrackingId());
            }
            conf.setStrings(MRJobConfig.JOB_TOKEN_TRACKING_IDS, trackingIds.toArray(new String[trackingIds.size()]));
        }
        // Set reservation info if it exists
        ReservationId reservationId = job.getReservationId();
        if (reservationId != null) {
            conf.set(MRJobConfig.RESERVATION_ID, reservationId.toString());
        }
        // Write job file to submit dir
        writeConf(conf, submitJobFile);
        Limits.reset(conf);
        //
        // Now, actually submit the job (using the submit name)
        //
        printTokens(jobId, job.getCredentials());
        status = submitClient.submitJob(jobId, submitJobDir.toString(), job.getCredentials());
        if (status != null) {
            return status;
        } else {
            throw new IOException("Could not launch job");
        }
    } finally {
        if (status == null) {
            LOG.info("Cleaning up the staging area " + submitJobDir);
            if (jtFs != null && submitJobDir != null)
                jtFs.delete(submitJobDir, true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) IOException(java.io.IOException) SecretKey(javax.crypto.SecretKey) ReservationId(org.apache.hadoop.yarn.api.records.ReservationId) InetAddress(java.net.InetAddress) KeyGenerator(javax.crypto.KeyGenerator)

Example 44 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JobStatus method readFields.

public synchronized void readFields(DataInput in) throws IOException {
    this.jobid = new JobID();
    this.jobid.readFields(in);
    this.setupProgress = in.readFloat();
    this.mapProgress = in.readFloat();
    this.reduceProgress = in.readFloat();
    this.cleanupProgress = in.readFloat();
    this.runState = WritableUtils.readEnum(in, State.class);
    this.startTime = in.readLong();
    this.user = StringInterner.weakIntern(Text.readString(in));
    this.priority = WritableUtils.readEnum(in, JobPriority.class);
    this.schedulingInfo = StringInterner.weakIntern(Text.readString(in));
    this.finishTime = in.readLong();
    this.isRetired = in.readBoolean();
    this.historyFile = StringInterner.weakIntern(Text.readString(in));
    this.jobName = StringInterner.weakIntern(Text.readString(in));
    this.trackingUrl = StringInterner.weakIntern(Text.readString(in));
    this.jobFile = StringInterner.weakIntern(Text.readString(in));
    this.isUber = in.readBoolean();
    // De-serialize the job's ACLs
    int numACLs = in.readInt();
    for (int i = 0; i < numACLs; i++) {
        JobACL aclType = WritableUtils.readEnum(in, JobACL.class);
        AccessControlList acl = new AccessControlList(" ");
        acl.readFields(in);
        this.jobACLs.put(aclType, acl);
    }
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList)

Example 45 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class TestLogLevel method createServer.

/**
   * Creates and starts a Jetty server binding at an ephemeral port to run
   * LogLevel servlet.
   * @param protocol "http" or "https"
   * @param isSpnego true if SPNEGO is enabled
   * @return a created HttpServer2 object
   * @throws Exception if unable to create or start a Jetty server
   */
private HttpServer2 createServer(String protocol, boolean isSpnego) throws Exception {
    HttpServer2.Builder builder = new HttpServer2.Builder().setName("..").addEndpoint(new URI(protocol + "://localhost:0")).setFindPort(true).setConf(conf);
    if (isSpnego) {
        // Set up server Kerberos credentials.
        // Since the server may fall back to simple authentication,
        // use ACL to make sure the connection is Kerberos/SPNEGO authenticated.
        builder.setSecurityEnabled(true).setUsernameConfKey(PRINCIPAL).setKeytabConfKey(KEYTAB).setACL(new AccessControlList(clientPrincipal));
    }
    // if using HTTPS, configure keystore/truststore properties.
    if (protocol.equals(LogLevel.PROTOCOL_HTTPS)) {
        builder = builder.keyPassword(sslConf.get("ssl.server.keystore.keypassword")).keyStore(sslConf.get("ssl.server.keystore.location"), sslConf.get("ssl.server.keystore.password"), sslConf.get("ssl.server.keystore.type", "jks")).trustStore(sslConf.get("ssl.server.truststore.location"), sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks"));
    }
    HttpServer2 server = builder.build();
    // Enable SPNEGO for LogLevel servlet
    if (isSpnego) {
        server.addInternalServlet("logLevel", "/logLevel", LogLevel.Servlet.class, true);
    }
    server.start();
    return server;
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) HttpServer2(org.apache.hadoop.http.HttpServer2) URI(java.net.URI)

Aggregations

AccessControlList (org.apache.hadoop.security.authorize.AccessControlList)62 Configuration (org.apache.hadoop.conf.Configuration)20 HashMap (java.util.HashMap)18 Test (org.junit.Test)15 JobACL (org.apache.hadoop.mapreduce.JobACL)10 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)10 ServletContext (javax.servlet.ServletContext)5 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)5 GetApplicationReportRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)5 KillApplicationRequest (org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest)5 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)5 IOException (java.io.IOException)4 URI (java.net.URI)4 ArrayList (java.util.ArrayList)4 Map (java.util.Map)4 HttpServletRequest (javax.servlet.http.HttpServletRequest)4 HttpServletResponse (javax.servlet.http.HttpServletResponse)4 ApplicationAccessType (org.apache.hadoop.yarn.api.records.ApplicationAccessType)4 QueueACL (org.apache.hadoop.yarn.api.records.QueueACL)3 AccessType (org.apache.hadoop.yarn.security.AccessType)3