use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class ShellBasedUnixGroupsMapping method resolvePartialGroupNames.
/**
* Attempt to partially resolve group names.
*
* @param userName the user's name
* @param errMessage error message from the shell command
* @param groupNames the incomplete list of group names
* @return a list of resolved group names
* @throws PartialGroupNameException if the resolution fails or times out
*/
private List<String> resolvePartialGroupNames(String userName, String errMessage, String groupNames) throws PartialGroupNameException {
// does.
if (Shell.WINDOWS) {
throw new PartialGroupNameException("Does not support partial group" + " name resolution on Windows. " + errMessage);
}
if (groupNames.isEmpty()) {
throw new PartialGroupNameException("The user name '" + userName + "' is not found. " + errMessage);
} else {
LOG.warn("Some group names for '{}' are not resolvable. {}", userName, errMessage);
// attempt to partially resolve group names
ShellCommandExecutor partialResolver = createGroupIDExecutor(userName);
try {
partialResolver.execute();
return parsePartialGroupNames(groupNames, partialResolver.getOutput());
} catch (ExitCodeException ece) {
// something is terribly wrong, so give up.
throw new PartialGroupNameException("failed to get group id list for user '" + userName + "'", ece);
} catch (IOException ioe) {
String message = "Can't execute the shell command to " + "get the list of group id for user '" + userName + "'";
if (partialResolver.isTimedOut()) {
message += " because of the command taking longer than " + "the configured timeout: " + timeout + " seconds";
}
throw new PartialGroupNameException(message, ioe);
}
}
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class ShellBasedUnixGroupsMapping method getUnixGroups.
/**
* Get the current user's group list from Unix by running the command 'groups'
* NOTE. For non-existing user it will return EMPTY list.
*
* @param user get groups for this user
* @return the groups list that the <code>user</code> belongs to. The primary
* group is returned first.
* @throws IOException if encounter any error when running the command
*/
private List<String> getUnixGroups(String user) throws IOException {
ShellCommandExecutor executor = createGroupExecutor(user);
List<String> groups;
try {
executor.execute();
groups = resolveFullGroupNames(executor.getOutput());
} catch (ExitCodeException e) {
try {
groups = resolvePartialGroupNames(user, e.getMessage(), executor.getOutput());
} catch (PartialGroupNameException pge) {
LOG.warn("unable to return groups for user {}", user, pge);
return EMPTY_GROUPS;
}
} catch (IOException ioe) {
// similar to how partial resolution failures are handled above
if (executor.isTimedOut()) {
LOG.warn("Unable to return groups for user '{}' as shell group lookup " + "command '{}' ran longer than the configured timeout limit of " + "{} seconds.", user, Joiner.on(' ').join(executor.getExecString()), timeout);
return EMPTY_GROUPS;
} else {
// If its not an executor timeout, we should let the caller handle it
throw ioe;
}
}
// remove duplicated primary group
if (!Shell.WINDOWS) {
for (int i = 1; i < groups.size(); i++) {
if (groups.get(i).equals(groups.get(0))) {
groups.remove(i);
break;
}
}
}
return groups;
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class SysInfoWindows method getSystemInfoInfoFromShell.
String getSystemInfoInfoFromShell() {
try {
ShellCommandExecutor shellExecutor = new ShellCommandExecutor(new String[] { Shell.getWinUtilsFile().getCanonicalPath(), "systeminfo" });
shellExecutor.execute();
return shellExecutor.getOutput();
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
return null;
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class FileUtil method unTarUsingTar.
private static void unTarUsingTar(File inFile, File untarDir, boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
if (gzipped) {
untarCommand.append(" gzip -dc '");
untarCommand.append(FileUtil.makeShellPath(inFile));
untarCommand.append("' | (");
}
untarCommand.append("cd '");
untarCommand.append(FileUtil.makeShellPath(untarDir));
untarCommand.append("' ; ");
untarCommand.append("tar -xf ");
if (gzipped) {
untarCommand.append(" -)");
} else {
untarCommand.append(FileUtil.makeShellPath(inFile));
}
String[] shellCmd = { "bash", "-c", untarCommand.toString() };
ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd);
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
throw new IOException("Error untarring file " + inFile + ". Tar process exited with exit code " + exitcode);
}
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class TestShellBasedUnixGroupsMapping method testFiniteGroupResolutionTime.
@Test(timeout = 4000)
public void testFiniteGroupResolutionTime() throws Exception {
Configuration conf = new Configuration();
String userName = "foobarnonexistinguser";
String commandTimeoutMessage = "ran longer than the configured timeout limit";
long testTimeout = 1L;
// Test a 1 second max-runtime timeout
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, testTimeout);
TestDelayedGroupCommand mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
assertEquals("Expected the group names executor to carry the configured timeout", testTimeout, executor.getTimeoutInterval());
executor = mapping.createGroupIDExecutor(userName);
assertEquals("Expected the group ID executor to carry the configured timeout", testTimeout, executor.getTimeoutInterval());
assertEquals("Expected no groups to be returned given a shell command timeout", 0, mapping.getGroups(userName).size());
assertTrue("Expected the logs to carry " + "a message about command timeout but was: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
shellMappingLog.clearOutput();
// Test also the parent Groups framework for expected behaviour
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, TestDelayedGroupCommand.class, GroupMappingServiceProvider.class);
Groups groups = new Groups(conf);
try {
groups.getGroups(userName);
fail("The groups framework call should " + "have failed with a command timeout");
} catch (IOException e) {
assertTrue("Expected the logs to carry " + "a message about command timeout but was: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
}
shellMappingLog.clearOutput();
// Test the no-timeout (default) configuration
conf = new Configuration();
long defaultTimeout = CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
executor = mapping.createGroupExecutor(userName);
assertEquals("Expected the group names executor to carry the default timeout", defaultTimeout, executor.getTimeoutInterval());
executor = mapping.createGroupIDExecutor(userName);
assertEquals("Expected the group ID executor to carry the default timeout", defaultTimeout, executor.getTimeoutInterval());
mapping.getGroups(userName);
assertFalse("Didn't expect a timeout of command in execution but logs carry it: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
}
Aggregations