Search in sources :

Example 61 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class AccessController method getUserPermissions.

@Override
public void getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request, RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) {
    AccessControlProtos.GetUserPermissionsResponse response = null;
    try {
        // only allowed to be called on _acl_ region
        if (aclRegion) {
            if (!initialized) {
                throw new CoprocessorException("AccessController not yet initialized");
            }
            User caller = RpcServer.getRequestUser();
            List<UserPermission> perms = null;
            if (request.getType() == AccessControlProtos.Permission.Type.Table) {
                final TableName table = request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
                requirePermission(caller, "userPermissions", table, null, null, Action.ADMIN);
                perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {

                    @Override
                    public List<UserPermission> run() throws Exception {
                        return AccessControlLists.getUserTablePermissions(regionEnv.getConfiguration(), table);
                    }
                });
            } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) {
                final String namespace = request.getNamespaceName().toStringUtf8();
                requireNamespacePermission(caller, "userPermissions", namespace, Action.ADMIN);
                perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {

                    @Override
                    public List<UserPermission> run() throws Exception {
                        return AccessControlLists.getUserNamespacePermissions(regionEnv.getConfiguration(), namespace);
                    }
                });
            } else {
                requirePermission(caller, "userPermissions", Action.ADMIN);
                perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {

                    @Override
                    public List<UserPermission> run() throws Exception {
                        return AccessControlLists.getUserPermissions(regionEnv.getConfiguration(), null);
                    }
                });
                // help in avoiding any leakage of information about being superusers.
                for (String user : Superusers.getSuperUsers()) {
                    perms.add(new UserPermission(user.getBytes(), AccessControlLists.ACL_TABLE_NAME, null, Action.values()));
                }
            }
            response = AccessControlUtil.buildGetUserPermissionsResponse(perms);
        } else {
            throw new CoprocessorException(AccessController.class, "This method " + "can only execute at " + AccessControlLists.ACL_TABLE_NAME + " table.");
        }
    } catch (IOException ioe) {
        // pass exception back up
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    }
    done.run(response);
}
Also used : User(org.apache.hadoop.hbase.security.User) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) CoprocessorException(org.apache.hadoop.hbase.coprocessor.CoprocessorException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) AccessControlProtos(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos) TableName(org.apache.hadoop.hbase.TableName) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) List(java.util.List) CoprocessorException(org.apache.hadoop.hbase.coprocessor.CoprocessorException)

Example 62 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class AccessController method preIncrementAfterRowLock.

@Override
public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c, final Increment increment) throws IOException {
    if (increment.getAttribute(CHECK_COVERING_PERM) != null) {
        // We had failure with table, cf and q perm checks and now giving a chance for cell
        // perm check
        TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
        AuthResult authResult = null;
        User user = getActiveUser(c);
        if (checkCoveringPermission(user, OpType.INCREMENT, c.getEnvironment(), increment.getRow(), increment.getFamilyCellMap(), increment.getTimeRange().getMax(), Action.WRITE)) {
            authResult = AuthResult.allow(OpType.INCREMENT.toString(), "Covering cell set", user, Action.WRITE, table, increment.getFamilyCellMap());
        } else {
            authResult = AuthResult.deny(OpType.INCREMENT.toString(), "Covering cell set", user, Action.WRITE, table, increment.getFamilyCellMap());
        }
        logResult(authResult);
        if (authorizationEnabled && !authResult.isAllowed()) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
    return null;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User)

Example 63 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class AccessController method preCheckAndPutAfterRowLock.

@Override
public boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException {
    if (put.getAttribute(CHECK_COVERING_PERM) != null) {
        // We had failure with table, cf and q perm checks and now giving a chance for cell
        // perm check
        TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
        Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
        AuthResult authResult = null;
        User user = getActiveUser(c);
        if (checkCoveringPermission(user, OpType.CHECK_AND_PUT, c.getEnvironment(), row, families, HConstants.LATEST_TIMESTAMP, Action.READ)) {
            authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, Action.READ, table, families);
        } else {
            authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set", user, Action.READ, table, families);
        }
        logResult(authResult);
        if (authorizationEnabled && !authResult.isAllowed()) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
    return result;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User)

Example 64 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class SnapshotReferenceUtil method verifyStoreFile.

/**
   * Verify the validity of the snapshot store file
   *
   * @param conf The current {@link Configuration} instance.
   * @param fs {@link FileSystem}
   * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
   * @param snapshot the {@link SnapshotDescription} of the snapshot to verify
   * @param regionInfo {@link HRegionInfo} of the region that contains the store file
   * @param family family that contains the store file
   * @param storeFile the store file to verify
   * @throws CorruptedSnapshotException if the snapshot is corrupted
   * @throws IOException if an error occurred while scanning the directory
   */
private static void verifyStoreFile(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshot, final HRegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
    TableName table = TableName.valueOf(snapshot.getTable());
    String fileName = storeFile.getName();
    Path refPath = null;
    if (StoreFileInfo.isReference(fileName)) {
        // If is a reference file check if the parent file is present in the snapshot
        refPath = new Path(new Path(regionInfo.getEncodedName(), family), fileName);
        refPath = StoreFileInfo.getReferredToFile(refPath);
        String refRegion = refPath.getParent().getParent().getName();
        refPath = HFileLink.createPath(table, refRegion, family, refPath.getName());
        if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) {
            throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName + " path=" + refPath, ProtobufUtil.createSnapshotDesc(snapshot));
        }
        if (storeFile.hasReference()) {
            // we already have the Reference information embedded here.
            return;
        }
    }
    Path linkPath;
    if (refPath != null && HFileLink.isHFileLink(refPath)) {
        linkPath = new Path(family, refPath.getName());
    } else if (HFileLink.isHFileLink(fileName)) {
        linkPath = new Path(family, fileName);
    } else {
        linkPath = new Path(family, HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName));
    }
    // check if the linked file exists (in the archive, or in the table dir)
    HFileLink link = null;
    if (MobUtils.isMobRegionInfo(regionInfo)) {
        // for mob region
        link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), HFileArchiveUtil.getArchivePath(conf), linkPath);
    } else {
        // not mob region
        link = HFileLink.buildFromHFileLinkPattern(conf, linkPath);
    }
    try {
        FileStatus fstat = link.getFileStatus(fs);
        if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) {
            String msg = "hfile: " + fileName + " size does not match with the expected one. " + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize();
            LOG.error(msg);
            throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
        }
    } catch (FileNotFoundException e) {
        String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() + ") directory for the primary table.";
        LOG.error(msg);
        throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HFileLink(org.apache.hadoop.hbase.io.HFileLink) TableName(org.apache.hadoop.hbase.TableName) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException)

Example 65 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class TestBackupLogCleaner method testBackupLogCleaner.

// implements all test cases in 1 test since incremental full backup/
// incremental backup has dependencies
@Test
public void testBackupLogCleaner() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
        // Verify that we have no backup sessions yet
        assertFalse(systemTable.hasBackupSessions());
        List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
        List<String> swalFiles = convert(walFiles);
        BackupLogCleaner cleaner = new BackupLogCleaner();
        cleaner.setConf(TEST_UTIL.getConfiguration());
        cleaner.init(null);
        cleaner.setConf(TEST_UTIL.getConfiguration());
        Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
        int size = Iterables.size(deletable);
        // We can delete all files because we do not have yet recorded backup sessions
        assertTrue(size == walFiles.size());
        systemTable.addWALFiles(swalFiles, "backup", "root");
        String backupIdFull = fullTableBackup(tableSetFullList);
        assertTrue(checkSucceeded(backupIdFull));
        // Check one more time
        deletable = cleaner.getDeletableFiles(walFiles);
        // We can delete wal files because they were saved into backup system table table
        size = Iterables.size(deletable);
        assertTrue(size == walFiles.size());
        List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
        LOG.debug("WAL list after full backup");
        convert(newWalFiles);
        // New list of wal files is greater than the previous one,
        // because new wal per RS have been opened after full backup
        assertTrue(walFiles.size() < newWalFiles.size());
        Connection conn = ConnectionFactory.createConnection(conf1);
        // #2 - insert some data to table
        HTable t1 = (HTable) conn.getTable(table1);
        Put p1;
        for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
            p1 = new Put(Bytes.toBytes("row-t1" + i));
            p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
            t1.put(p1);
        }
        t1.close();
        HTable t2 = (HTable) conn.getTable(table2);
        Put p2;
        for (int i = 0; i < 5; i++) {
            p2 = new Put(Bytes.toBytes("row-t2" + i));
            p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
            t2.put(p2);
        }
        t2.close();
        // #3 - incremental backup for multiple tables
        List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
        String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        deletable = cleaner.getDeletableFiles(newWalFiles);
        assertTrue(Iterables.size(deletable) == newWalFiles.size());
        conn.close();
    }
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Connection(org.apache.hadoop.hbase.client.Connection) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Aggregations

TableName (org.apache.hadoop.hbase.TableName)1033 Test (org.junit.Test)695 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)257 Table (org.apache.hadoop.hbase.client.Table)228 IOException (java.io.IOException)225 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)215 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)203 Result (org.apache.hadoop.hbase.client.Result)125 ArrayList (java.util.ArrayList)120 Put (org.apache.hadoop.hbase.client.Put)118 Path (org.apache.hadoop.fs.Path)113 Connection (org.apache.hadoop.hbase.client.Connection)103 Scan (org.apache.hadoop.hbase.client.Scan)98 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)89 ServerName (org.apache.hadoop.hbase.ServerName)85 Admin (org.apache.hadoop.hbase.client.Admin)85 Cell (org.apache.hadoop.hbase.Cell)77 HashMap (java.util.HashMap)75 Delete (org.apache.hadoop.hbase.client.Delete)66 InterruptedIOException (java.io.InterruptedIOException)63