Search in sources :

Example 16 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class HSAdminServer method serviceInit.

@Override
public void serviceInit(Configuration conf) throws Exception {
    RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class, ProtobufRpcEngine.class);
    RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB(this);
    BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService.newReflectiveBlockingService(refreshUserMappingXlator);
    GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB(this);
    BlockingService getUserMappingService = GetUserMappingsProtocolService.newReflectiveBlockingService(getUserMappingXlator);
    HSAdminRefreshProtocolServerSideTranslatorPB refreshHSAdminProtocolXlator = new HSAdminRefreshProtocolServerSideTranslatorPB(this);
    BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService.newReflectiveBlockingService(refreshHSAdminProtocolXlator);
    clientRpcAddress = conf.getSocketAddr(JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
    clientRpcServer = new RPC.Builder(conf).setProtocol(RefreshUserMappingsProtocolPB.class).setInstance(refreshUserMappingService).setBindAddress(clientRpcAddress.getHostName()).setPort(clientRpcAddress.getPort()).setVerbose(false).build();
    addProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService);
    addProtocol(conf, HSAdminRefreshProtocolPB.class, refreshHSAdminProtocolService);
    // Enable service authorization?
    if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
        clientRpcServer.refreshServiceAcl(conf, new ClientHSPolicyProvider());
    }
    adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL, JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) HSAdminRefreshProtocolServerSideTranslatorPB(org.apache.hadoop.mapreduce.v2.hs.protocolPB.HSAdminRefreshProtocolServerSideTranslatorPB) RefreshUserMappingsProtocolServerSideTranslatorPB(org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB) RPC(org.apache.hadoop.ipc.RPC) ClientHSPolicyProvider(org.apache.hadoop.mapreduce.v2.app.security.authorize.ClientHSPolicyProvider) BlockingService(com.google.protobuf.BlockingService) GetUserMappingsProtocolServerSideTranslatorPB(org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB)

Example 17 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class CompletedJob method checkAccess.

@Override
public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
    Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
    AccessControlList jobACL = jobACLs.get(jobOperation);
    if (jobACL == null) {
        return true;
    }
    return aclsMgr.checkAccess(callerUGI, jobOperation, jobInfo.getUsername(), jobACL);
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) JobACL(org.apache.hadoop.mapreduce.JobACL)

Example 18 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class AllocationFileLoaderService method loadQueue.

/**
   * Loads a queue from a queue element in the configuration file
   */
private void loadQueue(String parentName, Element element, Map<String, Resource> minQueueResources, Map<String, Resource> maxQueueResources, Map<String, Resource> maxChildQueueResources, Map<String, Integer> queueMaxApps, Map<String, Integer> userMaxApps, Map<String, Float> queueMaxAMShares, Map<String, ResourceWeights> queueWeights, Map<String, SchedulingPolicy> queuePolicies, Map<String, Long> minSharePreemptionTimeouts, Map<String, Long> fairSharePreemptionTimeouts, Map<String, Float> fairSharePreemptionThresholds, Map<String, Map<AccessType, AccessControlList>> queueAcls, Map<String, Map<ReservationACL, AccessControlList>> resAcls, Map<FSQueueType, Set<String>> configuredQueues, Set<String> reservableQueues, Set<String> nonPreemptableQueues) throws AllocationConfigurationException {
    String queueName = CharMatcher.WHITESPACE.trimFrom(element.getAttribute("name"));
    if (queueName.contains(".")) {
        throw new AllocationConfigurationException("Bad fair scheduler config " + "file: queue name (" + queueName + ") shouldn't contain period.");
    }
    if (queueName.isEmpty()) {
        throw new AllocationConfigurationException("Bad fair scheduler config " + "file: queue name shouldn't be empty or " + "consist only of whitespace.");
    }
    if (parentName != null) {
        queueName = parentName + "." + queueName;
    }
    Map<AccessType, AccessControlList> acls = new HashMap<>();
    Map<ReservationACL, AccessControlList> racls = new HashMap<>();
    NodeList fields = element.getChildNodes();
    boolean isLeaf = true;
    boolean isReservable = false;
    for (int j = 0; j < fields.getLength(); j++) {
        Node fieldNode = fields.item(j);
        if (!(fieldNode instanceof Element))
            continue;
        Element field = (Element) fieldNode;
        if ("minResources".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            Resource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            minQueueResources.put(queueName, val);
        } else if ("maxResources".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            Resource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            maxQueueResources.put(queueName, val);
        } else if ("maxChildResources".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            Resource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            maxChildQueueResources.put(queueName, val);
        } else if ("maxRunningApps".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            int val = Integer.parseInt(text);
            queueMaxApps.put(queueName, val);
        } else if ("maxAMShare".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            float val = Float.parseFloat(text);
            val = Math.min(val, 1.0f);
            queueMaxAMShares.put(queueName, val);
        } else if ("weight".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            double val = Double.parseDouble(text);
            queueWeights.put(queueName, new ResourceWeights((float) val));
        } else if ("minSharePreemptionTimeout".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            long val = Long.parseLong(text) * 1000L;
            minSharePreemptionTimeouts.put(queueName, val);
        } else if ("fairSharePreemptionTimeout".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            long val = Long.parseLong(text) * 1000L;
            fairSharePreemptionTimeouts.put(queueName, val);
        } else if ("fairSharePreemptionThreshold".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            float val = Float.parseFloat(text);
            val = Math.max(Math.min(val, 1.0f), 0.0f);
            fairSharePreemptionThresholds.put(queueName, val);
        } else if ("schedulingPolicy".equals(field.getTagName()) || "schedulingMode".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            SchedulingPolicy policy = SchedulingPolicy.parse(text);
            queuePolicies.put(queueName, policy);
        } else if ("aclSubmitApps".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            acls.put(AccessType.SUBMIT_APP, new AccessControlList(text));
        } else if ("aclAdministerApps".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            acls.put(AccessType.ADMINISTER_QUEUE, new AccessControlList(text));
        } else if ("aclAdministerReservations".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            racls.put(ReservationACL.ADMINISTER_RESERVATIONS, new AccessControlList(text));
        } else if ("aclListReservations".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            racls.put(ReservationACL.LIST_RESERVATIONS, new AccessControlList(text));
        } else if ("aclSubmitReservations".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            racls.put(ReservationACL.SUBMIT_RESERVATIONS, new AccessControlList(text));
        } else if ("reservation".equals(field.getTagName())) {
            isReservable = true;
            reservableQueues.add(queueName);
            configuredQueues.get(FSQueueType.PARENT).add(queueName);
        } else if ("allowPreemptionFrom".equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData().trim();
            if (!Boolean.parseBoolean(text)) {
                nonPreemptableQueues.add(queueName);
            }
        } else if ("queue".endsWith(field.getTagName()) || "pool".equals(field.getTagName())) {
            loadQueue(queueName, field, minQueueResources, maxQueueResources, maxChildQueueResources, queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights, queuePolicies, minSharePreemptionTimeouts, fairSharePreemptionTimeouts, fairSharePreemptionThresholds, queueAcls, resAcls, configuredQueues, reservableQueues, nonPreemptableQueues);
            isLeaf = false;
        }
    }
    // then store it as a parent queue
    if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
        configuredQueues.get(FSQueueType.LEAF).add(queueName);
    } else {
        if (isReservable) {
            throw new AllocationConfigurationException("The configuration settings" + " for " + queueName + " are invalid. A queue element that " + "contains child queue elements or that has the type='parent' " + "attribute cannot also include a reservation element.");
        }
        configuredQueues.get(FSQueueType.PARENT).add(queueName);
    }
    // The root queue defaults to all access
    for (QueueACL acl : QueueACL.values()) {
        AccessType accessType = SchedulerUtils.toAccessType(acl);
        if (acls.get(accessType) == null) {
            AccessControlList defaultAcl = queueName.equals(ROOT) ? EVERYBODY_ACL : NOBODY_ACL;
            acls.put(accessType, defaultAcl);
        }
    }
    queueAcls.put(queueName, acls);
    resAcls.put(queueName, racls);
    if (maxQueueResources.containsKey(queueName) && minQueueResources.containsKey(queueName) && !Resources.fitsIn(minQueueResources.get(queueName), maxQueueResources.get(queueName))) {
        LOG.warn(String.format("Queue %s has max resources %s less than " + "min resources %s", queueName, maxQueueResources.get(queueName), minQueueResources.get(queueName)));
    }
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) HashMap(java.util.HashMap) NodeList(org.w3c.dom.NodeList) Node(org.w3c.dom.Node) Element(org.w3c.dom.Element) Resource(org.apache.hadoop.yarn.api.records.Resource) QueueACL(org.apache.hadoop.yarn.api.records.QueueACL) Text(org.w3c.dom.Text) ResourceWeights(org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights) ReservationACL(org.apache.hadoop.yarn.api.records.ReservationACL) AccessType(org.apache.hadoop.yarn.security.AccessType)

Example 19 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class JobACLsManager method constructJobACLs.

/**
   * Construct the jobACLs from the configuration so that they can be kept in
   * the memory. If authorization is disabled on the JT, nothing is constructed
   * and an empty map is returned.
   * 
   * @return JobACL to AccessControlList map.
   */
public Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
    Map<JobACL, AccessControlList> acls = new HashMap<JobACL, AccessControlList>();
    // Don't construct anything if authorization is disabled.
    if (!areACLsEnabled()) {
        return acls;
    }
    for (JobACL aclName : JobACL.values()) {
        String aclConfigName = aclName.getAclName();
        String aclConfigured = conf.get(aclConfigName);
        if (aclConfigured == null) {
            // If ACLs are not configured at all, we grant no access to anyone. So
            // jobOwner and cluster administrator _only_ can do 'stuff'
            aclConfigured = " ";
        }
        acls.put(aclName, new AccessControlList(aclConfigured));
    }
    return acls;
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) HashMap(java.util.HashMap) JobACL(org.apache.hadoop.mapreduce.JobACL)

Example 20 with AccessControlList

use of org.apache.hadoop.security.authorize.AccessControlList in project hadoop by apache.

the class DeprecatedQueueConfigurationParser method createQueues.

private List<Queue> createQueues(Configuration conf) {
    String[] queueNameValues = conf.getStrings(MAPRED_QUEUE_NAMES_KEY);
    List<Queue> list = new ArrayList<Queue>();
    for (String name : queueNameValues) {
        try {
            Map<String, AccessControlList> acls = getQueueAcls(name, conf);
            QueueState state = getQueueState(name, conf);
            Queue q = new Queue(name, acls, state);
            list.add(q);
        } catch (Throwable t) {
            LOG.warn("Not able to initialize queue " + name);
        }
    }
    return list;
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) QueueState(org.apache.hadoop.mapreduce.QueueState) ArrayList(java.util.ArrayList)

Aggregations

AccessControlList (org.apache.hadoop.security.authorize.AccessControlList)62 Configuration (org.apache.hadoop.conf.Configuration)20 HashMap (java.util.HashMap)18 Test (org.junit.Test)15 JobACL (org.apache.hadoop.mapreduce.JobACL)10 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)10 ServletContext (javax.servlet.ServletContext)5 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)5 GetApplicationReportRequest (org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)5 KillApplicationRequest (org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest)5 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)5 IOException (java.io.IOException)4 URI (java.net.URI)4 ArrayList (java.util.ArrayList)4 Map (java.util.Map)4 HttpServletRequest (javax.servlet.http.HttpServletRequest)4 HttpServletResponse (javax.servlet.http.HttpServletResponse)4 ApplicationAccessType (org.apache.hadoop.yarn.api.records.ApplicationAccessType)4 QueueACL (org.apache.hadoop.yarn.api.records.QueueACL)3 AccessType (org.apache.hadoop.yarn.security.AccessType)3