use of com.twitter.ambrose.model.hadoop.MapReduceJobState in project ambrose by twitter.
the class AmbroseHiveStatPublisher method updateJobState.
private boolean updateJobState() throws IOException {
if (jobProgress == null) {
jobProgress = new MapReduceJobState(rj, jobClient.getMapTaskReports(jobId), jobClient.getReduceTaskReports(jobId));
return true;
}
boolean complete = rj.isComplete();
boolean successful = rj.isSuccessful();
float mapProgress = rj.mapProgress();
float reduceProgress = rj.reduceProgress();
boolean update = !(jobProgress.isComplete() == complete && jobProgress.isSuccessful() == successful && AmbroseHiveUtil.isEqual(jobProgress.getMapProgress(), mapProgress) && AmbroseHiveUtil.isEqual(jobProgress.getReduceProgress(), reduceProgress));
//do progress report only if necessary
if (update) {
jobProgress = new MapReduceJobState(rj, jobClient.getMapTaskReports(jobId), jobClient.getReduceTaskReports(jobId));
jobProgress.setJobLastUpdateTime(System.currentTimeMillis());
}
return update;
}
use of com.twitter.ambrose.model.hadoop.MapReduceJobState in project ambrose by twitter.
the class AmbroseHiveFailHook method run.
@Override
public void run(HookContext hookContext) throws Exception {
HiveConf conf = hookContext.getConf();
Properties allConfProps = conf.getAllProperties();
String queryId = AmbroseHiveUtil.getHiveQueryId(conf);
EmbeddedAmbroseHiveProgressReporter reporter = getEmbeddedProgressReporter();
List<TaskRunner> completeTaskList = hookContext.getCompleteTaskList();
Field _taskResultField = accessTaskResultField();
for (TaskRunner taskRunner : completeTaskList) {
TaskResult taskResult = (TaskResult) _taskResultField.get(taskRunner);
// get non-running, failed jobs
if (!taskResult.isRunning() && taskResult.getExitVal() != 0) {
Task<? extends Serializable> task = taskRunner.getTask();
String nodeId = AmbroseHiveUtil.getNodeIdFromNodeName(conf, task.getId());
DAGNode<Job> dagNode = reporter.getDAGNodeFromNodeId(nodeId);
HiveJob job = (HiveJob) dagNode.getJob();
job.setConfiguration(allConfProps);
MapReduceJobState mrJobState = getJobState(job);
mrJobState.setSuccessful(false);
reporter.addJob((Job) job);
reporter.pushEvent(queryId, new Event.JobFailedEvent(dagNode));
}
}
reporter.restoreEventStack();
String sleepTime = System.getProperty(POST_SCRIPT_SLEEP_SECS_PARAM, "10");
try {
int sleepTimeSeconds = Integer.parseInt(sleepTime);
LOG.info("Script failed but sleeping for " + sleepTimeSeconds + " seconds to keep the HiveStats REST server running. Hit ctrl-c to exit.");
Thread.sleep(sleepTimeSeconds * 1000L);
reporter.stopServer();
} catch (NumberFormatException e) {
LOG.warn(POST_SCRIPT_SLEEP_SECS_PARAM + " param is not a valid number, not sleeping: " + sleepTime);
} catch (InterruptedException e) {
LOG.warn("Sleep interrupted", e);
}
}
use of com.twitter.ambrose.model.hadoop.MapReduceJobState in project ambrose by twitter.
the class AmbroseHiveFailHook method getJobState.
private MapReduceJobState getJobState(HiveJob job) {
MapReduceJobState jobState = job.getMapReduceJobState();
if (jobState != null) {
return jobState;
}
// if job fails immediately after its submission
jobState = new MapReduceJobState();
jobState.setJobId(job.getId());
return jobState;
}
Aggregations