use of org.apache.hadoop.security.UserGroupInformation in project flink by apache.
the class YarnTaskExecutorRunner method run.
/**
* The instance entry point for the YARN task executor. Obtains user group
* information and calls the main work method {@link #runTaskExecutor(org.apache.flink.configuration.Configuration)} as a
* privileged action.
*
* @param args The command line arguments.
* @return The process exit code.
*/
protected int run(String[] args) {
try {
LOG.debug("All environment variables: {}", ENV);
final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
final String localDirs = ENV.get(Environment.LOCAL_DIRS.key());
LOG.info("Current working/local Directory: {}", localDirs);
final String currDir = ENV.get(Environment.PWD.key());
LOG.info("Current working Directory: {}", currDir);
final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
LOG.info("TM: remote keytab path obtained {}", remoteKeytabPath);
final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
LOG.info("TM: remote keytab principal obtained {}", remoteKeytabPrincipal);
final Configuration configuration = GlobalConfiguration.loadConfiguration(currDir);
FileSystem.setDefaultScheme(configuration);
// configure local directory
String flinkTempDirs = configuration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null);
if (flinkTempDirs == null) {
LOG.info("Setting directories for temporary file " + localDirs);
configuration.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, localDirs);
} else {
LOG.info("Overriding YARN's temporary file directories with those " + "specified in the Flink config: " + flinkTempDirs);
}
// tell akka to die in case of an error
configuration.setBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true);
String keytabPath = null;
if (remoteKeytabPath != null) {
File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
keytabPath = f.getAbsolutePath();
LOG.info("keytab path: {}", keytabPath);
}
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(), yarnClientUsername);
org.apache.hadoop.conf.Configuration hadoopConfiguration = null;
//To support Yarn Secure Integration Test Scenario
File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
if (krb5Conf.exists() && krb5Conf.canRead()) {
String krb5Path = krb5Conf.getAbsolutePath();
LOG.info("KRB5 Conf: {}", krb5Path);
hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
}
SecurityUtils.SecurityConfiguration sc;
if (hadoopConfiguration != null) {
sc = new SecurityUtils.SecurityConfiguration(configuration, hadoopConfiguration);
} else {
sc = new SecurityUtils.SecurityConfiguration(configuration);
}
if (keytabPath != null && remoteKeytabPrincipal != null) {
configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
}
SecurityUtils.install(sc);
return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return runTaskExecutor(configuration);
}
});
} catch (Throwable t) {
// make sure that everything whatever ends up in the log
LOG.error("YARN Application Master initialization failed", t);
return INIT_ERROR_EXIT_CODE;
}
}
use of org.apache.hadoop.security.UserGroupInformation in project flink by apache.
the class AbstractYarnClusterDescriptor method deploy.
@Override
public YarnClusterClient deploy() {
try {
if (UserGroupInformation.isSecurityEnabled()) {
// note: UGI::hasKerberosCredentials inaccurately reports false
// for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
// so we check only in ticket cache scenario.
boolean useTicketCache = flinkConfiguration.getBoolean(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE);
UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
if (loginUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS && useTicketCache && !loginUser.hasKerberosCredentials()) {
LOG.error("Hadoop security with Kerberos is enabled but the login user does not have Kerberos credentials");
throw new RuntimeException("Hadoop security with Kerberos is enabled but the login user " + "does not have Kerberos credentials");
}
}
return deployInternal();
} catch (Exception e) {
throw new RuntimeException("Couldn't deploy Yarn cluster", e);
}
}
use of org.apache.hadoop.security.UserGroupInformation in project flink by apache.
the class Utils method setTokensFor.
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
Credentials credentials = new Credentials();
// for HDFS
TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
// for HBase
obtainTokenForHBase(credentials, conf);
// for user
UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();
Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
for (Token<? extends TokenIdentifier> token : usrTok) {
final Text id = new Text(token.getIdentifier());
LOG.info("Adding user token " + id + " with " + token);
credentials.addToken(id, token);
}
try (DataOutputBuffer dob = new DataOutputBuffer()) {
credentials.writeTokenStorageToStream(dob);
if (LOG.isDebugEnabled()) {
LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
}
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
amContainer.setTokens(securityTokens);
}
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class FileSystem method checkAccessPermissions.
/**
* This method provides the default implementation of
* {@link #access(Path, FsAction)}.
*
* @param stat FileStatus to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws IOException for any error
*/
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode) throws AccessControlException, IOException {
FsPermission perm = stat.getPermission();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
if (user.equals(stat.getOwner())) {
if (perm.getUserAction().implies(mode)) {
return;
}
} else if (ugi.getGroups().contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
} else {
if (perm.getOtherAction().implies(mode)) {
return;
}
}
throw new AccessControlException(String.format("Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(), stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class FileContext method getFileContext.
/**
* Create a FileContext for specified default URI using the specified config.
*
* @param defaultFsUri
* @param aConf
* @return new FileContext for specified uri
* @throws UnsupportedFileSystemException If the file system with specified is
* not supported
* @throws RuntimeException If the file system specified is supported but
* could not be instantiated, or if login fails.
*/
public static FileContext getFileContext(final URI defaultFsUri, final Configuration aConf) throws UnsupportedFileSystemException {
UserGroupInformation currentUser = null;
AbstractFileSystem defaultAfs = null;
if (defaultFsUri.getScheme() == null) {
return getFileContext(aConf);
}
try {
currentUser = UserGroupInformation.getCurrentUser();
defaultAfs = getAbstractFileSystem(currentUser, defaultFsUri, aConf);
} catch (UnsupportedFileSystemException ex) {
throw ex;
} catch (IOException ex) {
LOG.error(ex);
throw new RuntimeException(ex);
}
return getFileContext(defaultAfs, aConf);
}
Aggregations