Search in sources :

Example 1 with User

use of org.apache.hadoop.hbase.security.User in project hbase by apache.

the class HFileReplicator method copyHFilesToStagingDir.

private Map<String, Path> copyHFilesToStagingDir() throws IOException {
    Map<String, Path> mapOfCopiedHFiles = new HashMap<>();
    Pair<byte[], List<String>> familyHFilePathsPair;
    List<String> hfilePaths;
    byte[] family;
    Path familyStagingDir;
    int familyHFilePathsPairsListSize;
    int totalNoOfHFiles;
    List<Pair<byte[], List<String>>> familyHFilePathsPairsList;
    FileSystem sourceFs = null;
    try {
        Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath);
        /*
       * Path#getFileSystem will by default get the FS from cache. If both source and sink cluster
       * has same FS name service then it will return peer cluster FS. To avoid this we explicitly
       * disable the loading of FS from cache, so that a new FS is created with source cluster
       * configuration.
       */
        String sourceScheme = sourceClusterPath.toUri().getScheme();
        String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme });
        sourceClusterConf.setBoolean(disableCacheName, true);
        sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf);
        User user = userProvider.getCurrent();
        // For each table name in the map
        for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) {
            String tableName = tableEntry.getKey();
            // Create staging directory for each table
            Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName));
            familyHFilePathsPairsList = tableEntry.getValue();
            familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();
            // For each list of family hfile paths pair in the table
            for (int i = 0; i < familyHFilePathsPairsListSize; i++) {
                familyHFilePathsPair = familyHFilePathsPairsList.get(i);
                family = familyHFilePathsPair.getFirst();
                hfilePaths = familyHFilePathsPair.getSecond();
                familyStagingDir = new Path(stagingDir, Bytes.toString(family));
                totalNoOfHFiles = hfilePaths.size();
                // For each list of hfile paths for the family
                List<Future<Void>> futures = new ArrayList<>();
                Callable<Void> c;
                Future<Void> future;
                int currentCopied = 0;
                // Copy the hfiles parallely
                while (totalNoOfHFiles > currentCopied + this.copiesPerThread) {
                    c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread));
                    future = exec.submit(c);
                    futures.add(future);
                    currentCopied += this.copiesPerThread;
                }
                int remaining = totalNoOfHFiles - currentCopied;
                if (remaining > 0) {
                    c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + remaining));
                    future = exec.submit(c);
                    futures.add(future);
                }
                for (Future<Void> f : futures) {
                    try {
                        f.get();
                    } catch (InterruptedException e) {
                        InterruptedIOException iioe = new InterruptedIOException("Failed to copy HFiles to local file system. This will be retried again " + "by the source cluster.");
                        iioe.initCause(e);
                        throw iioe;
                    } catch (ExecutionException e) {
                        throw new IOException("Failed to copy HFiles to local file system. This will " + "be retried again by the source cluster.", e);
                    }
                }
            }
            // Add the staging directory to this table. Staging directory contains all the hfiles
            // belonging to this table
            mapOfCopiedHFiles.put(tableName, stagingDir);
        }
        return mapOfCopiedHFiles;
    } finally {
        if (sourceFs != null) {
            sourceFs.close();
        }
        if (exec != null) {
            exec.shutdown();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) User(org.apache.hadoop.hbase.security.User) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) Pair(org.apache.hadoop.hbase.util.Pair) Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Future(java.util.concurrent.Future)

Example 2 with User

use of org.apache.hadoop.hbase.security.User in project hbase by apache.

the class AccessController method preDelete.

@Override
public void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c, final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
    // An ACL on a delete is useless, we shouldn't allow it
    if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
        throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString());
    }
    // Require WRITE permissions on all cells covered by the delete. Unlike
    // for Puts we need to check all visible prior versions, because a major
    // compaction could remove them. If the user doesn't have permission to
    // overwrite any of the visible versions ('visible' defined as not covered
    // by a tombstone already) then we have to disallow this operation.
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<Cell>> families = delete.getFamilyCellMap();
    User user = getActiveUser(c);
    AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE);
    logResult(authResult);
    if (!authResult.isAllowed()) {
        if (cellFeaturesEnabled && !compatibleEarlyTermination) {
            delete.setAttribute(CHECK_COVERING_PERM, TRUE);
        } else if (authorizationEnabled) {
            throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
        }
    }
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Cell(org.apache.hadoop.hbase.Cell)

Example 3 with User

use of org.apache.hadoop.hbase.security.User in project hbase by apache.

the class AccessController method preCloneSnapshot.

@Override
public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) throws IOException {
    User user = getActiveUser(ctx);
    if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) && hTableDescriptor.getNameAsString().equals(snapshot.getTable())) {
        // Snapshot owner is allowed to create a table with the same name as the snapshot he took
        AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(), "Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null);
        logResult(result);
    } else {
        requirePermission(user, "cloneSnapshot " + snapshot.getName(), Action.ADMIN);
    }
}
Also used : User(org.apache.hadoop.hbase.security.User)

Example 4 with User

use of org.apache.hadoop.hbase.security.User in project hbase by apache.

the class AccessController method grant.

/* ---- Protobuf AccessControlService implementation ---- */
@Override
public void grant(RpcController controller, AccessControlProtos.GrantRequest request, RpcCallback<AccessControlProtos.GrantResponse> done) {
    final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission());
    AccessControlProtos.GrantResponse response = null;
    try {
        // verify it's only running at .acl.
        if (aclRegion) {
            if (!initialized) {
                throw new CoprocessorException("AccessController not yet initialized");
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Received request to grant access permission " + perm.toString());
            }
            User caller = RpcServer.getRequestUser();
            switch(request.getUserPermission().getPermission().getType()) {
                case Global:
                case Table:
                    requirePermission(caller, "grant", perm.getTableName(), perm.getFamily(), perm.getQualifier(), Action.ADMIN);
                    break;
                case Namespace:
                    requireNamespacePermission(caller, "grant", perm.getNamespace(), Action.ADMIN);
                    break;
            }
            User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {

                @Override
                public Void run() throws Exception {
                    AccessControlLists.addUserPermission(regionEnv.getConfiguration(), perm, regionEnv.getTable(AccessControlLists.ACL_TABLE_NAME), request.getMergeExistingPermissions());
                    return null;
                }
            });
            if (AUDITLOG.isTraceEnabled()) {
                // audit log should store permission changes in addition to auth results
                AUDITLOG.trace("Granted permission " + perm.toString());
            }
        } else {
            throw new CoprocessorException(AccessController.class, "This method " + "can only execute at " + AccessControlLists.ACL_TABLE_NAME + " table.");
        }
        response = AccessControlProtos.GrantResponse.getDefaultInstance();
    } catch (IOException ioe) {
        // pass exception back up
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    }
    done.run(response);
}
Also used : AccessControlProtos(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos) User(org.apache.hadoop.hbase.security.User) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) CoprocessorException(org.apache.hadoop.hbase.coprocessor.CoprocessorException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CoprocessorException(org.apache.hadoop.hbase.coprocessor.CoprocessorException)

Example 5 with User

use of org.apache.hadoop.hbase.security.User in project hbase by apache.

the class AccessController method preIncrementColumnValue.

@Override
public long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family, final byte[] qualifier, final long amount, final boolean writeToWAL) throws IOException {
    // Require WRITE permission to the table, CF, and the KV to be replaced by the
    // incremented value
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
    User user = getActiveUser(c);
    AuthResult authResult = permissionGranted(OpType.INCREMENT_COLUMN_VALUE, user, env, families, Action.WRITE);
    if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
        authResult.setAllowed(checkCoveringPermission(user, OpType.INCREMENT_COLUMN_VALUE, env, row, families, HConstants.LATEST_TIMESTAMP, Action.WRITE));
        authResult.setReason("Covering cell set");
    }
    logResult(authResult);
    if (authorizationEnabled && !authResult.isAllowed()) {
        throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
    }
    return -1;
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User)

Aggregations

User (org.apache.hadoop.hbase.security.User)80 Test (org.junit.Test)35 IOException (java.io.IOException)33 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)23 Connection (org.apache.hadoop.hbase.client.Connection)22 Configuration (org.apache.hadoop.conf.Configuration)18 TableName (org.apache.hadoop.hbase.TableName)18 Table (org.apache.hadoop.hbase.client.Table)16 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)15 Put (org.apache.hadoop.hbase.client.Put)14 FileSystem (org.apache.hadoop.fs.FileSystem)10 Get (org.apache.hadoop.hbase.client.Get)10 ArrayList (java.util.ArrayList)8 Path (org.apache.hadoop.fs.Path)8 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)8 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)7 Admin (org.apache.hadoop.hbase.client.Admin)7 WAL (org.apache.hadoop.hbase.wal.WAL)7