use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class TestProcfsBasedProcessTree method sendSignal.
private static void sendSignal(String pid, int signal) throws IOException {
ShellCommandExecutor shexec = null;
String[] arg = { "kill", "-" + signal, "--", pid };
shexec = new ShellCommandExecutor(arg);
shexec.execute();
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class PrivilegedOperationExecutor method executePrivilegedOperation.
/**
* Executes a privileged operation. It is up to the callers to ensure that
* each privileged operation's parameters are constructed correctly. The
* parameters are passed verbatim to the container-executor binary.
*
* @param prefixCommands in some cases ( e.g priorities using nice ),
* prefix commands are necessary
* @param operation the type and arguments for the operation to be executed
* @param workingDir (optional) working directory for execution
* @param env (optional) env of the command will include specified vars
* @param grabOutput return (possibly large) shell command output
* @param inheritParentEnv inherit the env vars from the parent process
* @return stdout contents from shell executor - useful for some privileged
* operations - e.g --tc_read
* @throws org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException
*/
public String executePrivilegedOperation(List<String> prefixCommands, PrivilegedOperation operation, File workingDir, Map<String, String> env, boolean grabOutput, boolean inheritParentEnv) throws PrivilegedOperationException {
String[] fullCommandArray = getPrivilegedOperationExecutionCommand(prefixCommands, operation);
ShellCommandExecutor exec = new ShellCommandExecutor(fullCommandArray, workingDir, env, 0L, inheritParentEnv);
try {
exec.execute();
if (LOG.isDebugEnabled()) {
LOG.debug("command array:");
LOG.debug(Arrays.toString(fullCommandArray));
LOG.debug("Privileged Execution Operation Output:");
LOG.debug(exec.getOutput());
}
} catch (ExitCodeException e) {
if (operation.isFailureLoggingEnabled()) {
StringBuilder logBuilder = new StringBuilder("Shell execution returned " + "exit code: ").append(exec.getExitCode()).append(". Privileged Execution Operation Stderr: ").append(System.lineSeparator()).append(e.getMessage()).append(System.lineSeparator()).append("Stdout: " + exec.getOutput()).append(System.lineSeparator());
logBuilder.append("Full command array for failed execution: ").append(System.lineSeparator());
logBuilder.append(Arrays.toString(fullCommandArray));
LOG.warn(logBuilder.toString());
}
//'message' - so, we have to extract it and set it as the error out
throw new PrivilegedOperationException(e, e.getExitCode(), exec.getOutput(), e.getMessage());
} catch (IOException e) {
LOG.warn("IOException executing command: ", e);
throw new PrivilegedOperationException(e);
}
if (grabOutput) {
return exec.getOutput();
}
return null;
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class TestPipelinesFailover method testPipelineRecoveryStress.
/**
* Stress test for pipeline/lease recovery. Starts a number of
* threads, each of which creates a file and has another client
* break the lease. While these threads run, failover proceeds
* back and forth between two namenodes.
*/
@Test(timeout = STRESS_RUNTIME * 3)
public void testPipelineRecoveryStress() throws Exception {
// The following section of code is to help debug HDFS-6694 about
// this test that fails from time to time due to "too many open files".
//
LOG.info("HDFS-6694 Debug Data BEGIN");
String[][] scmds = new String[][] { { "/bin/sh", "-c", "ulimit -a" }, { "hostname" }, { "ifconfig", "-a" } };
for (String[] scmd : scmds) {
String scmd_str = StringUtils.join(" ", scmd);
try {
ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
sce.execute();
LOG.info("'" + scmd_str + "' output:\n" + sce.getOutput());
} catch (IOException e) {
LOG.warn("Error when running '" + scmd_str + "'", e);
}
}
LOG.info("HDFS-6694 Debug Data END");
HAStressTestHarness harness = new HAStressTestHarness();
// Disable permissions so that another user can recover the lease.
harness.conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
// This test triggers rapid NN failovers. The client retry policy uses an
// exponential backoff. This can quickly lead to long sleep times and even
// timeout the whole test. Cap the sleep time at 1s to prevent this.
harness.conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 1000);
final MiniDFSCluster cluster = harness.startCluster();
try {
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs = harness.getFailoverFs();
DistributedFileSystem fsAsOtherUser = createFsAsOtherUser(cluster, harness.conf);
TestContext testers = new TestContext();
for (int i = 0; i < STRESS_NUM_THREADS; i++) {
Path p = new Path("/test-" + i);
testers.addThread(new PipelineTestThread(testers, fs, fsAsOtherUser, p));
}
// Start a separate thread which will make sure that replication
// happens quickly by triggering deletion reports and replication
// work calculation frequently.
harness.addReplicationTriggerThread(500);
harness.addFailoverThread(5000);
harness.startThreads();
testers.startThreads();
testers.waitFor(STRESS_RUNTIME);
testers.stop();
harness.stopThreads();
} finally {
System.err.println("===========================\n\n\n\n");
harness.shutdown();
}
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class SysInfoLinux method getConf.
private static long getConf(String attr) {
if (Shell.LINUX) {
try {
ShellCommandExecutor shellExecutorClk = new ShellCommandExecutor(new String[] { "getconf", attr });
shellExecutorClk.execute();
return Long.parseLong(shellExecutorClk.getOutput().replace("\n", ""));
} catch (IOException | NumberFormatException e) {
return -1;
}
}
return -1;
}
use of org.apache.hadoop.util.Shell.ShellCommandExecutor in project hadoop by apache.
the class FileUtil method chmod.
/**
* Change the permissions on a file / directory, recursively, if
* needed.
* @param filename name of the file whose permissions are to change
* @param perm permission string
* @param recursive true, if permissions should be changed recursively
* @return the exit code from the command.
* @throws IOException
*/
public static int chmod(String filename, String perm, boolean recursive) throws IOException {
String[] cmd = Shell.getSetPermissionCommand(perm, recursive);
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = new File(filename).getPath();
ShellCommandExecutor shExec = new ShellCommandExecutor(args);
try {
shExec.execute();
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error while changing permission : " + filename + " Exception: " + StringUtils.stringifyException(e));
}
}
return shExec.getExitCode();
}
Aggregations