use of org.apache.hadoop.hbase.security.token.FsDelegationToken in project hbase by apache.
the class SecureBulkLoadManager method secureBulkLoadHFiles.
public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException {
final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
}
Token userToken = null;
if (userProvider.isHadoopSecurityEnabled()) {
userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService()));
}
final String bulkToken = request.getBulkToken();
User user = getActiveUser();
final UserGroupInformation ugi = user.getUGI();
if (userProvider.isHadoopSecurityEnabled()) {
try {
Token tok = TokenUtil.obtainToken(conn);
if (tok != null) {
boolean b = ugi.addToken(tok);
LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
}
} catch (IOException ioe) {
LOG.warn("unable to add token", ioe);
}
}
if (userToken != null) {
ugi.addToken(userToken);
} else if (userProvider.isHadoopSecurityEnabled()) {
//for mini cluster testing
throw new DoNotRetryIOException("User token cannot be null");
}
boolean bypass = false;
if (region.getCoprocessorHost() != null) {
bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
}
boolean loaded = false;
Map<byte[], List<Path>> map = null;
try {
if (!bypass) {
// ('request user'), another for the target fs (HBase region server principal).
if (userProvider.isHadoopSecurityEnabled()) {
FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
targetfsDelegationToken.acquireDelegationToken(fs);
Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
if (targetFsToken != null && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
ugi.addToken(targetFsToken);
}
}
map = ugi.doAs(new PrivilegedAction<Map<byte[], List<Path>>>() {
@Override
public Map<byte[], List<Path>> run() {
FileSystem fs = null;
try {
fs = FileSystem.get(conf);
for (Pair<byte[], String> el : familyPaths) {
Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
if (!fs.exists(stageFamily)) {
fs.mkdirs(stageFamily);
fs.setPermission(stageFamily, PERM_ALL_ACCESS);
}
}
//To enable access prior to staging
return region.bulkLoadHFiles(familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf), request.getCopyFile());
} catch (Exception e) {
LOG.error("Failed to complete bulk load", e);
}
return null;
}
});
if (map != null) {
loaded = true;
}
}
} finally {
if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map, loaded);
}
}
return map;
}
use of org.apache.hadoop.hbase.security.token.FsDelegationToken in project hbase by apache.
the class LoadIncrementalHFiles method initialize.
private void initialize() throws Exception {
if (initalized) {
return;
}
// make a copy, just to be sure we're not overriding someone else's config
setConf(HBaseConfiguration.create(getConf()));
Configuration conf = getConf();
// disable blockcache for tool invocation, see HBASE-10500
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
this.userProvider = UserProvider.instantiate(conf);
this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
nrThreads = conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
initalized = true;
numRetries = new AtomicInteger(1);
}
Aggregations