Search in sources :

Example 1 with FsDelegationToken

use of org.apache.hadoop.hbase.security.token.FsDelegationToken in project hbase by apache.

the class SecureBulkLoadManager method secureBulkLoadHFiles.

public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException {
    final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
    for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
        familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
    }
    Token userToken = null;
    if (userProvider.isHadoopSecurityEnabled()) {
        userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService()));
    }
    final String bulkToken = request.getBulkToken();
    User user = getActiveUser();
    final UserGroupInformation ugi = user.getUGI();
    if (userProvider.isHadoopSecurityEnabled()) {
        try {
            Token tok = TokenUtil.obtainToken(conn);
            if (tok != null) {
                boolean b = ugi.addToken(tok);
                LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
            }
        } catch (IOException ioe) {
            LOG.warn("unable to add token", ioe);
        }
    }
    if (userToken != null) {
        ugi.addToken(userToken);
    } else if (userProvider.isHadoopSecurityEnabled()) {
        //for mini cluster testing
        throw new DoNotRetryIOException("User token cannot be null");
    }
    boolean bypass = false;
    if (region.getCoprocessorHost() != null) {
        bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
    }
    boolean loaded = false;
    Map<byte[], List<Path>> map = null;
    try {
        if (!bypass) {
            // ('request user'), another for the target fs (HBase region server principal).
            if (userProvider.isHadoopSecurityEnabled()) {
                FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
                targetfsDelegationToken.acquireDelegationToken(fs);
                Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
                if (targetFsToken != null && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
                    ugi.addToken(targetFsToken);
                }
            }
            map = ugi.doAs(new PrivilegedAction<Map<byte[], List<Path>>>() {

                @Override
                public Map<byte[], List<Path>> run() {
                    FileSystem fs = null;
                    try {
                        fs = FileSystem.get(conf);
                        for (Pair<byte[], String> el : familyPaths) {
                            Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
                            if (!fs.exists(stageFamily)) {
                                fs.mkdirs(stageFamily);
                                fs.setPermission(stageFamily, PERM_ALL_ACCESS);
                            }
                        }
                        //To enable access prior to staging
                        return region.bulkLoadHFiles(familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf), request.getCopyFile());
                    } catch (Exception e) {
                        LOG.error("Failed to complete bulk load", e);
                    }
                    return null;
                }
            });
            if (map != null) {
                loaded = true;
            }
        }
    } finally {
        if (region.getCoprocessorHost() != null) {
            region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map, loaded);
        }
    }
    return map;
}
Also used : Path(org.apache.hadoop.fs.Path) User(org.apache.hadoop.hbase.security.User) BulkLoadHFileRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) Text(org.apache.hadoop.io.Text) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) PrivilegedAction(java.security.PrivilegedAction) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 2 with FsDelegationToken

use of org.apache.hadoop.hbase.security.token.FsDelegationToken in project hbase by apache.

the class LoadIncrementalHFiles method initialize.

private void initialize() throws Exception {
    if (initalized) {
        return;
    }
    // make a copy, just to be sure we're not overriding someone else's config
    setConf(HBaseConfiguration.create(getConf()));
    Configuration conf = getConf();
    // disable blockcache for tool invocation, see HBASE-10500
    conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
    this.userProvider = UserProvider.instantiate(conf);
    this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
    assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
    maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
    nrThreads = conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
    initalized = true;
    numRetries = new AtomicInteger(1);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Aggregations

FsDelegationToken (org.apache.hadoop.hbase.security.token.FsDelegationToken)2 IOException (java.io.IOException)1 PrivilegedAction (java.security.PrivilegedAction)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 User (org.apache.hadoop.hbase.security.User)1 BulkLoadHFileRequest (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest)1 Pair (org.apache.hadoop.hbase.util.Pair)1 Text (org.apache.hadoop.io.Text)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1 Token (org.apache.hadoop.security.token.Token)1