Search in sources :

Example 16 with AccessDeniedException

use of org.apache.hadoop.hbase.security.AccessDeniedException in project hbase by apache.

the class TestJMXConnectorServer method testHMConnectorServerWhenShutdownCluster.

/**
   * This tests to validate the HMaster's ConnectorServer after unauthorised shutdown call.
   */
@Test(timeout = 180000)
public void testHMConnectorServerWhenShutdownCluster() throws Exception {
    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, JMXListener.class.getName() + "," + MyAccessController.class.getName());
    conf.setInt("master.rmi.registry.port", rmiRegistryPort);
    UTIL.startMiniCluster();
    admin = UTIL.getConnection().getAdmin();
    boolean accessDenied = false;
    try {
        hasAccess = false;
        LOG.info("Stopping HMaster...");
        admin.shutdown();
    } catch (AccessDeniedException e) {
        LOG.error("Exception occured while stopping HMaster. ", e);
        accessDenied = true;
    }
    Assert.assertTrue(accessDenied);
    // Check whether HMaster JMX Connector server can be connected
    JMXConnector connector = null;
    try {
        connector = JMXConnectorFactory.connect(JMXListener.buildJMXServiceURL(rmiRegistryPort, rmiRegistryPort));
    } catch (IOException e) {
        if (e.getCause() instanceof ServiceUnavailableException) {
            Assert.fail("Can't connect to HMaster ConnectorServer.");
        }
    }
    Assert.assertNotNull("JMXConnector should not be null.", connector);
    connector.close();
}
Also used : AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) JMXConnector(javax.management.remote.JMXConnector) IOException(java.io.IOException) ServiceUnavailableException(javax.naming.ServiceUnavailableException) Test(org.junit.Test)

Example 17 with AccessDeniedException

use of org.apache.hadoop.hbase.security.AccessDeniedException in project hbase by apache.

the class HBaseFsck method preCheckPermission.

private void preCheckPermission() throws IOException, AccessDeniedException {
    if (shouldIgnorePreCheckPermission()) {
        return;
    }
    Path hbaseDir = FSUtils.getRootDir(getConf());
    FileSystem fs = hbaseDir.getFileSystem(getConf());
    UserProvider userProvider = UserProvider.instantiate(getConf());
    UserGroupInformation ugi = userProvider.getCurrent().getUGI();
    FileStatus[] files = fs.listStatus(hbaseDir);
    for (FileStatus file : files) {
        try {
            FSUtils.checkAccess(ugi, file, FsAction.WRITE);
        } catch (AccessDeniedException ace) {
            LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
            errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath() + ". Please rerun hbck as hdfs user " + file.getOwner());
            throw ace;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) FileStatus(org.apache.hadoop.fs.FileStatus) UserProvider(org.apache.hadoop.hbase.security.UserProvider) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 18 with AccessDeniedException

use of org.apache.hadoop.hbase.security.AccessDeniedException in project hbase by apache.

the class HBaseFsck method exec.

public HBaseFsck exec(ExecutorService exec, String[] args) throws KeeperException, IOException, ServiceException, InterruptedException {
    long sleepBeforeRerun = DEFAULT_SLEEP_BEFORE_RERUN;
    boolean checkCorruptHFiles = false;
    boolean sidelineCorruptHFiles = false;
    // Process command-line args.
    for (int i = 0; i < args.length; i++) {
        String cmd = args[i];
        if (cmd.equals("-help") || cmd.equals("-h")) {
            return printUsageAndExit();
        } else if (cmd.equals("-details")) {
            setDisplayFullReport();
        } else if (cmd.equals("-exclusive")) {
            setForceExclusive();
        } else if (cmd.equals("-timelag")) {
            if (i == args.length - 1) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
                return printUsageAndExit();
            }
            try {
                long timelag = Long.parseLong(args[i + 1]);
                setTimeLag(timelag);
            } catch (NumberFormatException e) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-timelag needs a numeric value.");
                return printUsageAndExit();
            }
            i++;
        } else if (cmd.equals("-sleepBeforeRerun")) {
            if (i == args.length - 1) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sleepBeforeRerun needs a value.");
                return printUsageAndExit();
            }
            try {
                sleepBeforeRerun = Long.parseLong(args[i + 1]);
            } catch (NumberFormatException e) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-sleepBeforeRerun needs a numeric value.");
                return printUsageAndExit();
            }
            i++;
        } else if (cmd.equals("-sidelineDir")) {
            if (i == args.length - 1) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sidelineDir needs a value.");
                return printUsageAndExit();
            }
            i++;
            setSidelineDir(args[i]);
        } else if (cmd.equals("-fix")) {
            errors.reportError(ERROR_CODE.WRONG_USAGE, "This option is deprecated, please use  -fixAssignments instead.");
            setFixAssignments(true);
        } else if (cmd.equals("-fixAssignments")) {
            setFixAssignments(true);
        } else if (cmd.equals("-fixMeta")) {
            setFixMeta(true);
        } else if (cmd.equals("-noHdfsChecking")) {
            setCheckHdfs(false);
        } else if (cmd.equals("-fixHdfsHoles")) {
            setFixHdfsHoles(true);
        } else if (cmd.equals("-fixHdfsOrphans")) {
            setFixHdfsOrphans(true);
        } else if (cmd.equals("-fixTableOrphans")) {
            setFixTableOrphans(true);
        } else if (cmd.equals("-fixHdfsOverlaps")) {
            setFixHdfsOverlaps(true);
        } else if (cmd.equals("-fixVersionFile")) {
            setFixVersionFile(true);
        } else if (cmd.equals("-sidelineBigOverlaps")) {
            setSidelineBigOverlaps(true);
        } else if (cmd.equals("-fixSplitParents")) {
            setFixSplitParents(true);
        } else if (cmd.equals("-removeParents")) {
            setRemoveParents(true);
        } else if (cmd.equals("-ignorePreCheckPermission")) {
            setIgnorePreCheckPermission(true);
        } else if (cmd.equals("-checkCorruptHFiles")) {
            checkCorruptHFiles = true;
        } else if (cmd.equals("-sidelineCorruptHFiles")) {
            sidelineCorruptHFiles = true;
        } else if (cmd.equals("-fixReferenceFiles")) {
            setFixReferenceFiles(true);
        } else if (cmd.equals("-fixHFileLinks")) {
            setFixHFileLinks(true);
        } else if (cmd.equals("-fixEmptyMetaCells")) {
            setFixEmptyMetaCells(true);
        } else if (cmd.equals("-repair")) {
            // this attempts to merge overlapping hdfs regions, needs testing
            // under load
            setFixHdfsHoles(true);
            setFixHdfsOrphans(true);
            setFixMeta(true);
            setFixAssignments(true);
            setFixHdfsOverlaps(true);
            setFixVersionFile(true);
            setSidelineBigOverlaps(true);
            setFixSplitParents(false);
            setCheckHdfs(true);
            setFixReferenceFiles(true);
            setFixHFileLinks(true);
        } else if (cmd.equals("-repairHoles")) {
            // this will make all missing hdfs regions available but may lose data
            setFixHdfsHoles(true);
            setFixHdfsOrphans(false);
            setFixMeta(true);
            setFixAssignments(true);
            setFixHdfsOverlaps(false);
            setSidelineBigOverlaps(false);
            setFixSplitParents(false);
            setCheckHdfs(true);
        } else if (cmd.equals("-maxOverlapsToSideline")) {
            if (i == args.length - 1) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxOverlapsToSideline needs a numeric value argument.");
                return printUsageAndExit();
            }
            try {
                int maxOverlapsToSideline = Integer.parseInt(args[i + 1]);
                setMaxOverlapsToSideline(maxOverlapsToSideline);
            } catch (NumberFormatException e) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxOverlapsToSideline needs a numeric value argument.");
                return printUsageAndExit();
            }
            i++;
        } else if (cmd.equals("-maxMerge")) {
            if (i == args.length - 1) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument.");
                return printUsageAndExit();
            }
            try {
                int maxMerge = Integer.parseInt(args[i + 1]);
                setMaxMerge(maxMerge);
            } catch (NumberFormatException e) {
                errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument.");
                return printUsageAndExit();
            }
            i++;
        } else if (cmd.equals("-summary")) {
            setSummary();
        } else if (cmd.equals("-metaonly")) {
            setCheckMetaOnly();
        } else if (cmd.equals("-boundaries")) {
            setRegionBoundariesCheck();
        } else if (cmd.equals("-fixReplication")) {
            setFixReplication(true);
        } else if (cmd.startsWith("-")) {
            errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd);
            return printUsageAndExit();
        } else {
            includeTable(TableName.valueOf(cmd));
            errors.print("Allow checking/fixes for table: " + cmd);
        }
    }
    errors.print("HBaseFsck command line options: " + StringUtils.join(args, " "));
    // pre-check current user has FS write permission or not
    try {
        preCheckPermission();
    } catch (AccessDeniedException ace) {
        Runtime.getRuntime().exit(-1);
    } catch (IOException ioe) {
        Runtime.getRuntime().exit(-1);
    }
    // do the real work of hbck
    connect();
    try {
        // if corrupt file mode is on, first fix them since they may be opened later
        if (checkCorruptHFiles || sidelineCorruptHFiles) {
            LOG.info("Checking all hfiles for corruption");
            HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles);
            // so we can get result
            setHFileCorruptionChecker(hfcc);
            Collection<TableName> tables = getIncludedTables();
            Collection<Path> tableDirs = new ArrayList<>();
            Path rootdir = FSUtils.getRootDir(getConf());
            if (tables.size() > 0) {
                for (TableName t : tables) {
                    tableDirs.add(FSUtils.getTableDir(rootdir, t));
                }
            } else {
                tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
            }
            hfcc.checkTables(tableDirs);
            hfcc.report(errors);
        }
        // check and fix table integrity, region consistency.
        int code = onlineHbck();
        setRetCode(code);
        // an infinite loop.
        if (shouldRerun()) {
            try {
                LOG.info("Sleeping " + sleepBeforeRerun + "ms before re-checking after fix...");
                Thread.sleep(sleepBeforeRerun);
            } catch (InterruptedException ie) {
                LOG.warn("Interrupted while sleeping");
                return this;
            }
            // Just report
            setFixAssignments(false);
            setFixMeta(false);
            setFixHdfsHoles(false);
            setFixHdfsOverlaps(false);
            setFixVersionFile(false);
            setFixTableOrphans(false);
            errors.resetErrors();
            code = onlineHbck();
            setRetCode(code);
        }
    } finally {
        IOUtils.closeQuietly(this);
    }
    return this;
}
Also used : Path(org.apache.hadoop.fs.Path) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TableName(org.apache.hadoop.hbase.TableName) HFileCorruptionChecker(org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker)

Example 19 with AccessDeniedException

use of org.apache.hadoop.hbase.security.AccessDeniedException in project hbase by apache.

the class TestGenerateDelegationToken method testTokenAuth.

private void testTokenAuth(Class<? extends RpcClient> rpcImplClass) throws IOException, ServiceException {
    TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, rpcImplClass.getName());
    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
        Table table = conn.getTable(TableName.META_TABLE_NAME)) {
        CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW);
        AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
        WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance());
        assertEquals(USERNAME, response.getUsername());
        assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod());
        try {
            service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
        } catch (ServiceException e) {
            AccessDeniedException exc = (AccessDeniedException) ProtobufUtil.handleRemoteException(e);
            assertTrue(exc.getMessage().contains("Token generation only allowed for Kerberos authenticated clients"));
        }
    }
}
Also used : AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) Table(org.apache.hadoop.hbase.client.Table) ServiceException(com.google.protobuf.ServiceException) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) Connection(org.apache.hadoop.hbase.client.Connection) WhoAmIResponse(org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse)

Example 20 with AccessDeniedException

use of org.apache.hadoop.hbase.security.AccessDeniedException in project hbase by apache.

the class AccessController method checkCoveringPermission.

/**
   * Determine if cell ACLs covered by the operation grant access. This is expensive.
   * @return false if cell ACLs failed to grant access, true otherwise
   * @throws IOException
   */
private boolean checkCoveringPermission(User user, OpType request, RegionCoprocessorEnvironment e, byte[] row, Map<byte[], ? extends Collection<?>> familyMap, long opTs, Action... actions) throws IOException {
    if (!cellFeaturesEnabled) {
        return false;
    }
    long cellGrants = 0;
    long latestCellTs = 0;
    Get get = new Get(row);
    // Only in case of Put/Delete op, consider TS within cell (if set for individual cells).
    // When every cell, within a Mutation, can be linked with diff TS we can not rely on only one
    // version. We have to get every cell version and check its TS against the TS asked for in
    // Mutation and skip those Cells which is outside this Mutation TS.In case of Put, we have to
    // consider only one such passing cell. In case of Delete we have to consider all the cell
    // versions under this passing version. When Delete Mutation contains columns which are a
    // version delete just consider only one version for those column cells.
    boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE);
    if (considerCellTs) {
        get.setMaxVersions();
    } else {
        get.setMaxVersions(1);
    }
    boolean diffCellTsFromOpTs = false;
    for (Map.Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
        byte[] col = entry.getKey();
        // maps so we would not need to do this
        if (entry.getValue() instanceof Set) {
            Set<byte[]> set = (Set<byte[]>) entry.getValue();
            if (set == null || set.isEmpty()) {
                get.addFamily(col);
            } else {
                for (byte[] qual : set) {
                    get.addColumn(col, qual);
                }
            }
        } else if (entry.getValue() instanceof List) {
            List<Cell> list = (List<Cell>) entry.getValue();
            if (list == null || list.isEmpty()) {
                get.addFamily(col);
            } else {
                // In case of family delete, a Cell will be added into the list with Qualifier as null.
                for (Cell cell : list) {
                    if (cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode())) {
                        get.addFamily(col);
                    } else {
                        get.addColumn(col, CellUtil.cloneQualifier(cell));
                    }
                    if (considerCellTs) {
                        long cellTs = cell.getTimestamp();
                        latestCellTs = Math.max(latestCellTs, cellTs);
                        diffCellTsFromOpTs = diffCellTsFromOpTs || (opTs != cellTs);
                    }
                }
            }
        } else if (entry.getValue() == null) {
            get.addFamily(col);
        } else {
            throw new RuntimeException("Unhandled collection type " + entry.getValue().getClass().getName());
        }
    }
    // We want to avoid looking into the future. So, if the cells of the
    // operation specify a timestamp, or the operation itself specifies a
    // timestamp, then we use the maximum ts found. Otherwise, we bound
    // the Get to the current server time. We add 1 to the timerange since
    // the upper bound of a timerange is exclusive yet we need to examine
    // any cells found there inclusively.
    long latestTs = Math.max(opTs, latestCellTs);
    if (latestTs == 0 || latestTs == HConstants.LATEST_TIMESTAMP) {
        latestTs = EnvironmentEdgeManager.currentTime();
    }
    get.setTimeRange(0, latestTs + 1);
    // case with Put. There no need to get all versions but get latest version only.
    if (!diffCellTsFromOpTs && request == OpType.PUT) {
        get.setMaxVersions(1);
    }
    if (LOG.isTraceEnabled()) {
        LOG.trace("Scanning for cells with " + get);
    }
    // This Map is identical to familyMap. The key is a BR rather than byte[].
    // It will be easy to do gets over this new Map as we can create get keys over the Cell cf by
    // new SimpleByteRange(cell.familyArray, cell.familyOffset, cell.familyLen)
    Map<ByteRange, List<Cell>> familyMap1 = new HashMap<>();
    for (Entry<byte[], ? extends Collection<?>> entry : familyMap.entrySet()) {
        if (entry.getValue() instanceof List) {
            familyMap1.put(new SimpleMutableByteRange(entry.getKey()), (List<Cell>) entry.getValue());
        }
    }
    RegionScanner scanner = getRegion(e).getScanner(new Scan(get));
    List<Cell> cells = Lists.newArrayList();
    Cell prevCell = null;
    ByteRange curFam = new SimpleMutableByteRange();
    boolean curColAllVersions = (request == OpType.DELETE);
    long curColCheckTs = opTs;
    boolean foundColumn = false;
    try {
        boolean more = false;
        ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(1).build();
        do {
            cells.clear();
            // scan with limit as 1 to hold down memory use on wide rows
            more = scanner.next(cells, scannerContext);
            for (Cell cell : cells) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Found cell " + cell);
                }
                boolean colChange = prevCell == null || !CellUtil.matchingColumn(prevCell, cell);
                if (colChange)
                    foundColumn = false;
                prevCell = cell;
                if (!curColAllVersions && foundColumn) {
                    continue;
                }
                if (colChange && considerCellTs) {
                    curFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
                    List<Cell> cols = familyMap1.get(curFam);
                    for (Cell col : cols) {
                        // why the below (col.getQualifierLength() == 0) check.
                        if ((col.getQualifierLength() == 0 && request == OpType.DELETE) || CellUtil.matchingQualifier(cell, col)) {
                            byte type = col.getTypeByte();
                            if (considerCellTs) {
                                curColCheckTs = col.getTimestamp();
                            }
                            // For a Delete op we pass allVersions as true. When a Delete Mutation contains
                            // a version delete for a column no need to check all the covering cells within
                            // that column. Check all versions when Type is DeleteColumn or DeleteFamily
                            // One version delete types are Delete/DeleteFamilyVersion
                            curColAllVersions = (KeyValue.Type.DeleteColumn.getCode() == type) || (KeyValue.Type.DeleteFamily.getCode() == type);
                            break;
                        }
                    }
                }
                if (cell.getTimestamp() > curColCheckTs) {
                    // Just ignore this cell. This is not a covering cell.
                    continue;
                }
                foundColumn = true;
                for (Action action : actions) {
                    // Are there permissions for this user for the cell?
                    if (!authManager.authorize(user, getTableName(e), cell, action)) {
                        // We can stop if the cell ACL denies access
                        return false;
                    }
                }
                cellGrants++;
            }
        } while (more);
    } catch (AccessDeniedException ex) {
        throw ex;
    } catch (IOException ex) {
        LOG.error("Exception while getting cells to calculate covering permission", ex);
    } finally {
        scanner.close();
    }
    // after no table or CF grants are found.
    return cellGrants > 0;
}
Also used : PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Action(org.apache.hadoop.hbase.security.access.Permission.Action) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) Set(java.util.Set) TreeSet(java.util.TreeSet) ImmutableSet(com.google.common.collect.ImmutableSet) HashMap(java.util.HashMap) ByteRange(org.apache.hadoop.hbase.util.ByteRange) SimpleMutableByteRange(org.apache.hadoop.hbase.util.SimpleMutableByteRange) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) SimpleMutableByteRange(org.apache.hadoop.hbase.util.SimpleMutableByteRange) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Get(org.apache.hadoop.hbase.client.Get) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext)

Aggregations

AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)35 User (org.apache.hadoop.hbase.security.User)20 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)13 IOException (java.io.IOException)12 TableName (org.apache.hadoop.hbase.TableName)8 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)8 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)7 Action (org.apache.hadoop.hbase.security.access.Permission.Action)7 ArrayList (java.util.ArrayList)6 ByteString (com.google.protobuf.ByteString)5 Cell (org.apache.hadoop.hbase.Cell)5 Path (org.apache.hadoop.fs.Path)3 RegionActionResult (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult)3 VisibilityLabelsResponse (org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse)3 OperationStatus (org.apache.hadoop.hbase.regionserver.OperationStatus)3 ReplicationEndpoint (org.apache.hadoop.hbase.replication.ReplicationEndpoint)3 ImmutableSet (com.google.common.collect.ImmutableSet)2 Set (java.util.Set)2 TreeMap (java.util.TreeMap)2 TreeSet (java.util.TreeSet)2