use of org.apache.hadoop.hive.ql.stats.ClientStatsPublisher in project hive by apache.
the class HadoopJobExecHelper method progress.
private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockException {
JobClient jc = th.getJobClient();
RunningJob rj = th.getRunningJob();
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
//DecimalFormat longFormatter = new DecimalFormat("###,###");
long reportTime = System.currentTimeMillis();
long maxReportInterval = HiveConf.getTimeVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS);
boolean fatal = false;
StringBuilder errMsg = new StringBuilder();
long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
boolean initializing = true;
boolean initOutputPrinted = false;
long cpuMsec = -1;
int numMap = -1;
int numReduce = -1;
List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
final boolean localMode = ShimLoader.getHadoopShims().isLocalMode(job);
while (!rj.isComplete()) {
if (th.getContext() != null) {
th.getContext().checkHeartbeaterLockException();
}
try {
Thread.sleep(pullInterval);
} catch (InterruptedException e) {
}
if (initializing && rj.getJobState() == JobStatus.PREP) {
// No reason to poll untill the job is initialized
continue;
} else {
// By now the job is initialized so no reason to do
// rj.getJobState() again and we do not want to do an extra RPC call
initializing = false;
}
if (!localMode) {
if (!initOutputPrinted) {
SessionState ss = SessionState.get();
String logMapper;
String logReducer;
TaskReport[] mappers = jc.getMapTaskReports(rj.getID());
if (mappers == null) {
logMapper = "no information for number of mappers; ";
} else {
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
}
logMapper = "number of mappers: " + numMap + "; ";
}
TaskReport[] reducers = jc.getReduceTaskReports(rj.getID());
if (reducers == null) {
logReducer = "no information for number of reducers. ";
} else {
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
}
logReducer = "number of reducers: " + numReduce;
}
console.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
initOutputPrinted = true;
}
RunningJob newRj = jc.getJob(rj.getID());
if (newRj == null) {
// So raise a meaningful exception
throw new IOException("Could not find status of job:" + rj.getID());
} else {
th.setRunningJob(newRj);
rj = newRj;
}
}
// let the job retry several times, which eventually lead to failure.
if (fatal) {
// wait until rj.isComplete
continue;
}
Counters ctrs = th.getCounters();
if (fatal = checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
rj.killJob();
continue;
}
errMsg.setLength(0);
updateCounters(ctrs, rj);
// Prepare data for Client Stat Publishers (if any present) and execute them
if (clientStatPublishers.size() > 0 && ctrs != null) {
Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
try {
clientStatPublisher.run(exctractedCounters, rj.getID().toString());
} catch (RuntimeException runtimeException) {
LOG.error("Exception " + runtimeException.getClass().getCanonicalName() + " thrown when running clientStatsPublishers. The stack trace is: ", runtimeException);
}
}
}
if (mapProgress == lastMapProgress && reduceProgress == lastReduceProgress && System.currentTimeMillis() < reportTime + maxReportInterval) {
continue;
}
StringBuilder report = new StringBuilder();
report.append(dateFormat.format(Calendar.getInstance().getTime()));
report.append(' ').append(getId());
report.append(" map = ").append(mapProgress).append("%, ");
report.append(" reduce = ").append(reduceProgress).append('%');
// it out.
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > 0) {
cpuMsec = newCpuMSec;
report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
}
}
}
// write out serialized plan with counters to log file
// LOG.info(queryPlan);
String output = report.toString();
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(queryId, this.task);
this.callBackObj.logPlanProgress(ss);
}
}
console.printInfo(output);
task.setStatusMessage(output);
reportTime = System.currentTimeMillis();
}
Counters ctrs = th.getCounters();
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > cpuMsec) {
cpuMsec = newCpuMSec;
}
}
}
if (cpuMsec > 0) {
String status = "MapReduce Total cumulative CPU time: " + Utilities.formatMsecToStr(cpuMsec);
console.printInfo(status);
task.setStatusMessage(status);
}
boolean success;
if (fatal) {
success = false;
} else {
// the last check before the job is completed
if (checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString());
success = false;
} else {
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
}
success = rj.isSuccessful();
}
}
MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());
mapRedStats.setCounters(ctrs);
// update based on the final value of the counters
updateCounters(ctrs, rj);
SessionState ss = SessionState.get();
if (ss != null) {
this.callBackObj.logPlanProgress(ss);
}
// LOG.info(queryPlan);
return mapRedStats;
}
Aggregations