Search in sources :

Example 76 with Set

use of java.util.Set in project hbase by apache.

the class AccessController method checkPermissions.

@Override
public void checkPermissions(RpcController controller, AccessControlProtos.CheckPermissionsRequest request, RpcCallback<AccessControlProtos.CheckPermissionsResponse> done) {
    Permission[] permissions = new Permission[request.getPermissionCount()];
    for (int i = 0; i < request.getPermissionCount(); i++) {
        permissions[i] = AccessControlUtil.toPermission(request.getPermission(i));
    }
    AccessControlProtos.CheckPermissionsResponse response = null;
    try {
        User user = RpcServer.getRequestUser();
        TableName tableName = regionEnv.getRegion().getTableDesc().getTableName();
        for (Permission permission : permissions) {
            if (permission instanceof TablePermission) {
                // Check table permissions
                TablePermission tperm = (TablePermission) permission;
                for (Action action : permission.getActions()) {
                    if (!tperm.getTableName().equals(tableName)) {
                        throw new CoprocessorException(AccessController.class, String.format("This method " + "can only execute at the table specified in TablePermission. " + "Table of the region:%s , requested table:%s", tableName, tperm.getTableName()));
                    }
                    Map<byte[], Set<byte[]>> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
                    if (tperm.getFamily() != null) {
                        if (tperm.getQualifier() != null) {
                            Set<byte[]> qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR);
                            qualifiers.add(tperm.getQualifier());
                            familyMap.put(tperm.getFamily(), qualifiers);
                        } else {
                            familyMap.put(tperm.getFamily(), null);
                        }
                    }
                    AuthResult result = permissionGranted("checkPermissions", user, action, regionEnv, familyMap);
                    logResult(result);
                    if (!result.isAllowed()) {
                        // effective permissions, so throw unconditionally
                        throw new AccessDeniedException("Insufficient permissions (table=" + tableName + (familyMap.size() > 0 ? ", family: " + result.toFamilyString() : "") + ", action=" + action.toString() + ")");
                    }
                }
            } else {
                for (Action action : permission.getActions()) {
                    AuthResult result;
                    if (authManager.authorize(user, action)) {
                        result = AuthResult.allow("checkPermissions", "Global action allowed", user, action, null, null);
                    } else {
                        result = AuthResult.deny("checkPermissions", "Global action denied", user, action, null, null);
                    }
                    logResult(result);
                    if (!result.isAllowed()) {
                        // effective permissions, so throw unconditionally
                        throw new AccessDeniedException("Insufficient permissions (action=" + action.toString() + ")");
                    }
                }
            }
        }
        response = AccessControlProtos.CheckPermissionsResponse.getDefaultInstance();
    } catch (IOException ioe) {
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    }
    done.run(response);
}
Also used : PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Action(org.apache.hadoop.hbase.security.access.Permission.Action) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) Set(java.util.Set) TreeSet(java.util.TreeSet) ImmutableSet(com.google.common.collect.ImmutableSet) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TreeMap(java.util.TreeMap) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) AccessControlProtos(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos) TableName(org.apache.hadoop.hbase.TableName) CoprocessorException(org.apache.hadoop.hbase.coprocessor.CoprocessorException)

Example 77 with Set

use of java.util.Set in project hadoop by apache.

the class TestRMAppAttemptTransitions method testScheduleTransitionReplaceAMContainerRequestWithDefaults.

@SuppressWarnings("unchecked")
@Test
public void testScheduleTransitionReplaceAMContainerRequestWithDefaults() {
    YarnScheduler mockScheduler = mock(YarnScheduler.class);
    when(mockScheduler.allocate(any(ApplicationAttemptId.class), any(List.class), any(List.class), any(List.class), any(List.class), any(ContainerUpdates.class))).thenAnswer(new Answer<Allocation>() {

        @SuppressWarnings("rawtypes")
        @Override
        public Allocation answer(InvocationOnMock invocation) throws Throwable {
            ResourceRequest rr = (ResourceRequest) ((List) invocation.getArguments()[1]).get(0);
            // capacity shouldn't changed
            assertEquals(Resource.newInstance(3333, 1), rr.getCapability());
            assertEquals("label-expression", rr.getNodeLabelExpression());
            // priority, #container, relax-locality will be changed
            assertEquals(RMAppAttemptImpl.AM_CONTAINER_PRIORITY, rr.getPriority());
            assertEquals(1, rr.getNumContainers());
            assertEquals(ResourceRequest.ANY, rr.getResourceName());
            // just return an empty allocation
            List l = new ArrayList();
            Set s = new HashSet();
            return new Allocation(l, Resources.none(), s, s, l);
        }
    });
    // create an attempt.
    applicationAttempt = new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(), spyRMContext, scheduler, masterService, submissionContext, new Configuration(), ResourceRequest.newInstance(Priority.UNDEFINED, "host1", Resource.newInstance(3333, 1), 3, false, "label-expression"), application);
    new RMAppAttemptImpl.ScheduleTransition().transition((RMAppAttemptImpl) applicationAttempt, null);
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerUpdates(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Allocation(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation) YarnScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ArrayList(java.util.ArrayList) List(java.util.List) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 78 with Set

use of java.util.Set in project hadoop by apache.

the class ApplicationEntityReader method createFilterListForColsOfInfoFamily.

/**
   * Creates a filter list which indicates that only some of the column
   * qualifiers in the info column family will be returned in result.
   *
   * @return filter list.
   * @throws IOException if any problem occurs while creating filter list.
   */
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
    // Add filters for each column in entity table.
    updateFixedColumns(infoFamilyColsFilter);
    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
    // with INFO column prefix.
    if (hasField(fieldsToRetrieve, Field.INFO)) {
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.INFO));
    }
    TimelineFilterList relatesTo = getFilters().getRelatesTo();
    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
        // If RELATES_TO field has to be retrieved, add a filter for fetching
        // columns with RELATES_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.RELATES_TO));
    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain RELATES_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // relatesTo filters are specified. relatesTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> relatesToCols = TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.RELATES_TO, relatesToCols));
    }
    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
        // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
        // columns with IS_RELATED_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain IS_RELATED_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // isRelatedTo filters are specified. isRelatedTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> isRelatedToCols = TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
    }
    TimelineFilterList eventFilters = getFilters().getEventFilters();
    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
        // If EVENTS field has to be retrieved, add a filter for fetching columns
        // with EVENT column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, ApplicationColumnPrefix.EVENT));
    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain EVENTS, we still need to
        // have a filter to fetch some of the column qualifiers on the basis of
        // event filters specified. Event filters will then be matched after
        // fetching rows from HBase.
        Set<String> eventCols = TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(ApplicationColumnPrefix.EVENT, eventCols));
    }
    return infoFamilyColsFilter;
}
Also used : Field(org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field) EnumSet(java.util.EnumSet) Set(java.util.Set) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)

Example 79 with Set

use of java.util.Set in project hadoop by apache.

the class GenericEntityReader method createFilterListForColsOfInfoFamily.

/**
   * Creates a filter list which indicates that only some of the column
   * qualifiers in the info column family will be returned in result.
   *
   * @param isApplication If true, it means operations are to be performed for
   *          application table, otherwise for entity table.
   * @return filter list.
   * @throws IOException if any problem occurs while creating filter list.
   */
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
    // Add filters for each column in entity table.
    updateFixedColumns(infoFamilyColsFilter);
    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
    // with INFO column prefix.
    if (hasField(fieldsToRetrieve, Field.INFO)) {
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.INFO));
    }
    TimelineFilterList relatesTo = getFilters().getRelatesTo();
    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
        // If RELATES_TO field has to be retrieved, add a filter for fetching
        // columns with RELATES_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.RELATES_TO));
    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain RELATES_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // relatesTo filters are specified. relatesTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> relatesToCols = TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.RELATES_TO, relatesToCols));
    }
    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
        // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
        // columns with IS_RELATED_TO column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.IS_RELATED_TO));
    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain IS_RELATED_TO, we still
        // need to have a filter to fetch some of the column qualifiers if
        // isRelatedTo filters are specified. isRelatedTo filters will then be
        // matched after fetching rows from HBase.
        Set<String> isRelatedToCols = TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.IS_RELATED_TO, isRelatedToCols));
    }
    TimelineFilterList eventFilters = getFilters().getEventFilters();
    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
        // If EVENTS field has to be retrieved, add a filter for fetching columns
        // with EVENT column prefix.
        infoFamilyColsFilter.addFilter(TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL, EntityColumnPrefix.EVENT));
    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()) {
        // Even if fields to retrieve does not contain EVENTS, we still need to
        // have a filter to fetch some of the column qualifiers on the basis of
        // event filters specified. Event filters will then be matched after
        // fetching rows from HBase.
        Set<String> eventCols = TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
        infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(EntityColumnPrefix.EVENT, eventCols));
    }
    return infoFamilyColsFilter;
}
Also used : Field(org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field) EnumSet(java.util.EnumSet) Set(java.util.Set) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList) FilterList(org.apache.hadoop.hbase.filter.FilterList) TimelineFilterList(org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList)

Example 80 with Set

use of java.util.Set in project hbase by apache.

the class FavoredNodeAssignmentHelper method generateMissingFavoredNodeMultiRack.

/*
   * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped.
   *
   * Get the current layout of favored nodes arrangement and nodes to be excluded and get a
   * random node that goes with HDFS block placement. Eg: If the existing nodes are on one rack,
   * generate one from another rack. We exclude as much as possible so the random selection
   * has more chance to generate a node within a few iterations, ideally 1.
   */
private ServerName generateMissingFavoredNodeMultiRack(List<ServerName> favoredNodes, List<ServerName> excludeNodes) throws IOException {
    Set<String> racks = Sets.newHashSet();
    Map<String, Set<ServerName>> rackToFNMapping = new HashMap<>();
    // Lets understand the current rack distribution of the FN
    for (ServerName sn : favoredNodes) {
        String rack = getRackOfServer(sn);
        racks.add(rack);
        Set<ServerName> serversInRack = rackToFNMapping.get(rack);
        if (serversInRack == null) {
            serversInRack = Sets.newHashSet();
            rackToFNMapping.put(rack, serversInRack);
        }
        serversInRack.add(sn);
    }
    // What racks should be skipped while getting a FN?
    Set<String> skipRackSet = Sets.newHashSet();
    /*
     * If both the FN are from the same rack, then we don't want to generate another FN on the
     * same rack. If that rack fails, the region would be unavailable.
     */
    if (racks.size() == 1 && favoredNodes.size() > 1) {
        skipRackSet.add(racks.iterator().next());
    }
    /*
     * If there are no free nodes on the existing racks, we should skip those racks too. We can
     * reduce the number of iterations for FN selection.
     */
    for (String rack : racks) {
        if (getServersFromRack(rack) != null && rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()) {
            skipRackSet.add(rack);
        }
    }
    Set<ServerName> favoredNodeSet = Sets.newHashSet(favoredNodes);
    if (excludeNodes != null && excludeNodes.size() > 0) {
        favoredNodeSet.addAll(excludeNodes);
    }
    /*
     * Lets get a random rack by excluding skipRackSet and generate a random FN from that rack.
     */
    int i = 0;
    Set<String> randomRacks = Sets.newHashSet();
    ServerName newServer = null;
    do {
        String randomRack = this.getOneRandomRack(skipRackSet);
        newServer = this.getOneRandomServer(randomRack, favoredNodeSet);
        randomRacks.add(randomRack);
        i++;
    } while ((i < MAX_ATTEMPTS_FN_GENERATION) && (newServer == null));
    if (newServer == null) {
        if (LOG.isTraceEnabled()) {
            LOG.trace(String.format("Unable to generate additional favored nodes for %s after " + "considering racks %s and skip rack %s with a unique rack list of %s and rack " + "to RS map of %s and RS to rack map of %s", StringUtils.join(favoredNodes, ","), randomRacks, skipRackSet, uniqueRackList, rackToRegionServerMap, regionServerToRackMap));
        }
        throw new IOException(" Unable to generate additional favored nodes for " + StringUtils.join(favoredNodes, ","));
    }
    return newServer;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) IOException(java.io.IOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209