use of org.apache.hadoop.hive.ql.exec.TaskRunner in project hive by apache.
the class TaskQueue method pollFinished.
/**
* Polls running tasks to see if a task has ended.
*
* @return The result object for any completed/failed task
*/
public synchronized TaskRunner pollFinished() throws InterruptedException {
while (!shutdown) {
Iterator<TaskRunner> it = running.iterator();
while (it.hasNext()) {
TaskRunner runner = it.next();
if (runner != null && !runner.isRunning()) {
it.remove();
return runner;
}
}
wait(SLEEP_TIME);
}
return null;
}
use of org.apache.hadoop.hive.ql.exec.TaskRunner in project hive by apache.
the class MapJoinCounterHook method run.
public void run(HookContext hookContext) {
HiveConf conf = hookContext.getConf();
boolean enableConvert = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECONVERTJOIN);
if (!enableConvert) {
return;
}
QueryPlan plan = hookContext.getQueryPlan();
String queryID = plan.getQueryId();
// String query = SessionState.get().getCmd();
int commonJoin = 0;
int hintedMapJoin = 0;
int convertedMapJoin = 0;
int hintedMapJoinLocal = 0;
int convertedMapJoinLocal = 0;
int backupCommonJoin = 0;
List<TaskRunner> list = hookContext.getCompleteTaskList();
for (TaskRunner tskRunner : list) {
Task tsk = tskRunner.getTask();
int tag = tsk.getTaskTag();
switch(tag) {
case Task.COMMON_JOIN:
commonJoin++;
break;
case Task.HINTED_MAPJOIN:
hintedMapJoin++;
break;
case Task.HINTED_MAPJOIN_LOCAL:
hintedMapJoinLocal++;
break;
case Task.CONVERTED_MAPJOIN:
convertedMapJoin++;
break;
case Task.CONVERTED_MAPJOIN_LOCAL:
convertedMapJoinLocal++;
break;
case Task.BACKUP_COMMON_JOIN:
backupCommonJoin++;
break;
}
}
LogHelper console = SessionState.getConsole();
console.printError("[MapJoinCounter PostHook] COMMON_JOIN: " + commonJoin + " HINTED_MAPJOIN: " + hintedMapJoin + " HINTED_MAPJOIN_LOCAL: " + hintedMapJoinLocal + " CONVERTED_MAPJOIN: " + convertedMapJoin + " CONVERTED_MAPJOIN_LOCAL: " + convertedMapJoinLocal + " BACKUP_COMMON_JOIN: " + backupCommonJoin);
}
use of org.apache.hadoop.hive.ql.exec.TaskRunner in project ambrose by twitter.
the class AmbroseHiveFailHook method run.
@Override
public void run(HookContext hookContext) throws Exception {
HiveConf conf = hookContext.getConf();
Properties allConfProps = conf.getAllProperties();
String queryId = AmbroseHiveUtil.getHiveQueryId(conf);
EmbeddedAmbroseHiveProgressReporter reporter = getEmbeddedProgressReporter();
List<TaskRunner> completeTaskList = hookContext.getCompleteTaskList();
Field _taskResultField = accessTaskResultField();
for (TaskRunner taskRunner : completeTaskList) {
TaskResult taskResult = (TaskResult) _taskResultField.get(taskRunner);
// get non-running, failed jobs
if (!taskResult.isRunning() && taskResult.getExitVal() != 0) {
Task<? extends Serializable> task = taskRunner.getTask();
String nodeId = AmbroseHiveUtil.getNodeIdFromNodeName(conf, task.getId());
DAGNode<Job> dagNode = reporter.getDAGNodeFromNodeId(nodeId);
HiveJob job = (HiveJob) dagNode.getJob();
job.setConfiguration(allConfProps);
MapReduceJobState mrJobState = getJobState(job);
mrJobState.setSuccessful(false);
reporter.addJob((Job) job);
reporter.pushEvent(queryId, new Event.JobFailedEvent(dagNode));
}
}
reporter.restoreEventStack();
String sleepTime = System.getProperty(POST_SCRIPT_SLEEP_SECS_PARAM, "10");
try {
int sleepTimeSeconds = Integer.parseInt(sleepTime);
LOG.info("Script failed but sleeping for " + sleepTimeSeconds + " seconds to keep the HiveStats REST server running. Hit ctrl-c to exit.");
Thread.sleep(sleepTimeSeconds * 1000L);
reporter.stopServer();
} catch (NumberFormatException e) {
LOG.warn(POST_SCRIPT_SLEEP_SECS_PARAM + " param is not a valid number, not sleeping: " + sleepTime);
} catch (InterruptedException e) {
LOG.warn("Sleep interrupted", e);
}
}
Aggregations