use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class ShellBasedUnixGroupsMapping method getUnixGroups.
/**
* Get the current user's group list from Unix by running the command 'groups'
* NOTE. For non-existing user it will return EMPTY list.
*
* @param user get groups for this user
* @return the groups list that the <code>user</code> belongs to. The primary
* group is returned first.
* @throws IOException if encounter any error when running the command
*/
private List<String> getUnixGroups(String user) throws IOException {
ShellCommandExecutor executor = createGroupExecutor(user);
List<String> groups;
try {
executor.execute();
groups = resolveFullGroupNames(executor.getOutput());
} catch (ExitCodeException e) {
try {
groups = resolvePartialGroupNames(user, e.getMessage(), executor.getOutput());
} catch (PartialGroupNameException pge) {
LOG.warn("unable to return groups for user {}", user, pge);
return EMPTY_GROUPS;
}
} catch (IOException ioe) {
// similar to how partial resolution failures are handled above
if (executor.isTimedOut()) {
LOG.warn("Unable to return groups for user '{}' as shell group lookup " + "command '{}' ran longer than the configured timeout limit of " + "{} seconds.", user, Joiner.on(' ').join(executor.getExecString()), timeout);
return EMPTY_GROUPS;
} else {
// If its not an executor timeout, we should let the caller handle it
throw ioe;
}
}
// remove duplicated primary group
if (!Shell.WINDOWS) {
for (int i = 1; i < groups.size(); i++) {
if (groups.get(i).equals(groups.get(0))) {
groups.remove(i);
break;
}
}
}
return groups;
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class TestShellBasedUnixGroupsMapping method testFiniteGroupResolutionTime.
@Test(timeout = 4000)
public void testFiniteGroupResolutionTime() throws Exception {
Configuration conf = new Configuration();
String userName = "foobarnonexistinguser";
String commandTimeoutMessage = "ran longer than the configured timeout limit";
long testTimeout = 1L;
// Test a 1 second max-runtime timeout
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS, testTimeout);
TestDelayedGroupCommand mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
assertEquals("Expected the group names executor to carry the configured timeout", testTimeout, executor.getTimeoutInterval());
executor = mapping.createGroupIDExecutor(userName);
assertEquals("Expected the group ID executor to carry the configured timeout", testTimeout, executor.getTimeoutInterval());
assertEquals("Expected no groups to be returned given a shell command timeout", 0, mapping.getGroups(userName).size());
assertTrue("Expected the logs to carry " + "a message about command timeout but was: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
shellMappingLog.clearOutput();
// Test also the parent Groups framework for expected behaviour
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, TestDelayedGroupCommand.class, GroupMappingServiceProvider.class);
Groups groups = new Groups(conf);
try {
groups.getGroups(userName);
fail("The groups framework call should " + "have failed with a command timeout");
} catch (IOException e) {
assertTrue("Expected the logs to carry " + "a message about command timeout but was: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
}
shellMappingLog.clearOutput();
// Test the no-timeout (default) configuration
conf = new Configuration();
long defaultTimeout = CommonConfigurationKeys.HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
mapping = ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
executor = mapping.createGroupExecutor(userName);
assertEquals("Expected the group names executor to carry the default timeout", defaultTimeout, executor.getTimeoutInterval());
executor = mapping.createGroupIDExecutor(userName);
assertEquals("Expected the group ID executor to carry the default timeout", defaultTimeout, executor.getTimeoutInterval());
mapping.getGroups(userName);
assertFalse("Didn't expect a timeout of command in execution but logs carry it: " + shellMappingLog.getOutput(), shellMappingLog.getOutput().contains(commandTimeoutMessage));
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class ProcessTree method isAlive.
/**
* Is the process with PID pid still alive?
* This method assumes that isAlive is called on a pid that was alive not
* too long ago, and hence assumes no chance of pid-wrapping-around.
*
* @param pid pid of the process to check.
* @return true if process is alive.
*/
public static boolean isAlive(String pid) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-0", pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (ExitCodeException ee) {
return false;
} catch (IOException ioe) {
LOG.warn("Error executing shell command " + shexec.toString() + ioe);
return false;
}
return (shexec.getExitCode() == 0 ? true : false);
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class ProcessTree method sendSignal.
/**
* Send a specified signal to the specified pid
*
* @param pid the pid of the process [group] to signal.
* @param signalNum the signal to send.
* @param signalName the human-readable description of the signal
* (for logging).
*/
private static void sendSignal(String pid, int signalNum, String signalName) {
ShellCommandExecutor shexec = null;
try {
String[] args = { "kill", "-" + signalNum, pid };
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.warn("Error executing shell command " + ioe);
} finally {
if (pid.startsWith("-")) {
LOG.info("Sending signal to all members of process group " + pid + ": " + signalName + ". Exit code " + shexec.getExitCode());
} else {
LOG.info("Signaling process " + pid + " with " + signalName + ". Exit code " + shexec.getExitCode());
}
}
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hbase by apache.
the class HealthChecker method init.
/**
* Initialize.
*
* @param configuration
*/
public void init(String location, long timeout) {
this.healthCheckScript = location;
this.scriptTimeout = timeout;
ArrayList<String> execScript = new ArrayList<>();
execScript.add(healthCheckScript);
this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, null, scriptTimeout);
LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + ", timeout=" + timeout);
}
Aggregations