use of org.apache.hadoop.hive.ql.exec.TaskRunner in project hive by apache.
the class DriverContext method pollFinished.
/**
* Polls running tasks to see if a task has ended.
*
* @return The result object for any completed/failed task
*/
public synchronized TaskRunner pollFinished() throws InterruptedException {
while (!shutdown) {
Iterator<TaskRunner> it = running.iterator();
while (it.hasNext()) {
TaskRunner runner = it.next();
if (runner != null && !runner.isRunning()) {
it.remove();
return runner;
}
}
wait(SLEEP_TIME);
}
return null;
}
use of org.apache.hadoop.hive.ql.exec.TaskRunner in project hive by apache.
the class DriverContext method shutdown.
/**
* Cleans up remaining tasks in case of failure
*/
public synchronized void shutdown() {
LOG.debug("Shutting down query " + ctx.getCmd());
shutdown = true;
for (TaskRunner runner : running) {
if (runner.isRunning()) {
Task<?> task = runner.getTask();
LOG.warn("Shutting down task : " + task);
try {
task.shutdown();
} catch (Exception e) {
console.printError("Exception on shutting down task " + task.getId() + ": " + e);
}
Thread thread = runner.getRunner();
if (thread != null) {
thread.interrupt();
}
}
}
running.clear();
}
use of org.apache.hadoop.hive.ql.exec.TaskRunner in project ambrose by twitter.
the class AmbroseHiveFailHook method run.
@Override
public void run(HookContext hookContext) throws Exception {
HiveConf conf = hookContext.getConf();
Properties allConfProps = conf.getAllProperties();
String queryId = AmbroseHiveUtil.getHiveQueryId(conf);
EmbeddedAmbroseHiveProgressReporter reporter = getEmbeddedProgressReporter();
List<TaskRunner> completeTaskList = hookContext.getCompleteTaskList();
Field _taskResultField = accessTaskResultField();
for (TaskRunner taskRunner : completeTaskList) {
TaskResult taskResult = (TaskResult) _taskResultField.get(taskRunner);
// get non-running, failed jobs
if (!taskResult.isRunning() && taskResult.getExitVal() != 0) {
Task<? extends Serializable> task = taskRunner.getTask();
String nodeId = AmbroseHiveUtil.getNodeIdFromNodeName(conf, task.getId());
DAGNode<Job> dagNode = reporter.getDAGNodeFromNodeId(nodeId);
HiveJob job = (HiveJob) dagNode.getJob();
job.setConfiguration(allConfProps);
MapReduceJobState mrJobState = getJobState(job);
mrJobState.setSuccessful(false);
reporter.addJob((Job) job);
reporter.pushEvent(queryId, new Event.JobFailedEvent(dagNode));
}
}
reporter.restoreEventStack();
String sleepTime = System.getProperty(POST_SCRIPT_SLEEP_SECS_PARAM, "10");
try {
int sleepTimeSeconds = Integer.parseInt(sleepTime);
LOG.info("Script failed but sleeping for " + sleepTimeSeconds + " seconds to keep the HiveStats REST server running. Hit ctrl-c to exit.");
Thread.sleep(sleepTimeSeconds * 1000L);
reporter.stopServer();
} catch (NumberFormatException e) {
LOG.warn(POST_SCRIPT_SLEEP_SECS_PARAM + " param is not a valid number, not sleeping: " + sleepTime);
} catch (InterruptedException e) {
LOG.warn("Sleep interrupted", e);
}
}
Aggregations