use of org.apache.hadoop.mapred.Counters.Counter in project hive by apache.
the class MapRedStats method toString.
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (numMap > 0) {
sb.append("Map: " + numMap + " ");
}
if (numReduce > 0) {
sb.append("Reduce: " + numReduce + " ");
}
if (cpuMSec > 0) {
sb.append(" Cumulative CPU: " + (cpuMSec / 1000D) + " sec ");
}
if (counters != null) {
Counter hdfsReadCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_READ");
long hdfsRead;
if (hdfsReadCntr != null && (hdfsRead = hdfsReadCntr.getValue()) >= 0) {
sb.append(" HDFS Read: " + hdfsRead);
}
Counter hdfsWrittenCntr = counters.findCounter("FileSystemCounters", "HDFS_BYTES_WRITTEN");
long hdfsWritten;
if (hdfsWrittenCntr != null && (hdfsWritten = hdfsWrittenCntr.getValue()) >= 0) {
sb.append(" HDFS Write: " + hdfsWritten);
}
}
sb.append(" " + (success ? "SUCCESS" : "FAIL"));
return sb.toString();
}
use of org.apache.hadoop.mapred.Counters.Counter in project voldemort by voldemort.
the class AbstractHadoopJob method run.
public void run(JobConf conf) throws Exception {
_runningJob = new JobClient(conf).submitJob(conf);
info("See " + _runningJob.getTrackingURL() + " for details.");
_runningJob.waitForCompletion();
if (!_runningJob.isSuccessful()) {
throw new Exception("Hadoop job:" + getId() + " failed!");
}
// dump all counters
Counters counters = _runningJob.getCounters();
for (String groupName : counters.getGroupNames()) {
Counters.Group group = counters.getGroup(groupName);
info("Group: " + group.getDisplayName());
for (Counter counter : group) info(counter.getDisplayName() + ":\t" + counter.getValue());
}
}
use of org.apache.hadoop.mapred.Counters.Counter in project hive by apache.
the class HadoopJobExecHelper method progress.
private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockException {
JobClient jc = th.getJobClient();
RunningJob rj = th.getRunningJob();
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
//DecimalFormat longFormatter = new DecimalFormat("###,###");
long reportTime = System.currentTimeMillis();
long maxReportInterval = HiveConf.getTimeVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS);
boolean fatal = false;
StringBuilder errMsg = new StringBuilder();
long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
boolean initializing = true;
boolean initOutputPrinted = false;
long cpuMsec = -1;
int numMap = -1;
int numReduce = -1;
List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
final boolean localMode = ShimLoader.getHadoopShims().isLocalMode(job);
while (!rj.isComplete()) {
if (th.getContext() != null) {
th.getContext().checkHeartbeaterLockException();
}
try {
Thread.sleep(pullInterval);
} catch (InterruptedException e) {
}
if (initializing && rj.getJobState() == JobStatus.PREP) {
// No reason to poll untill the job is initialized
continue;
} else {
// By now the job is initialized so no reason to do
// rj.getJobState() again and we do not want to do an extra RPC call
initializing = false;
}
if (!localMode) {
if (!initOutputPrinted) {
SessionState ss = SessionState.get();
String logMapper;
String logReducer;
TaskReport[] mappers = jc.getMapTaskReports(rj.getID());
if (mappers == null) {
logMapper = "no information for number of mappers; ";
} else {
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
}
logMapper = "number of mappers: " + numMap + "; ";
}
TaskReport[] reducers = jc.getReduceTaskReports(rj.getID());
if (reducers == null) {
logReducer = "no information for number of reducers. ";
} else {
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
}
logReducer = "number of reducers: " + numReduce;
}
console.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
initOutputPrinted = true;
}
RunningJob newRj = jc.getJob(rj.getID());
if (newRj == null) {
// So raise a meaningful exception
throw new IOException("Could not find status of job:" + rj.getID());
} else {
th.setRunningJob(newRj);
rj = newRj;
}
}
// let the job retry several times, which eventually lead to failure.
if (fatal) {
// wait until rj.isComplete
continue;
}
Counters ctrs = th.getCounters();
if (fatal = checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
rj.killJob();
continue;
}
errMsg.setLength(0);
updateCounters(ctrs, rj);
// Prepare data for Client Stat Publishers (if any present) and execute them
if (clientStatPublishers.size() > 0 && ctrs != null) {
Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
try {
clientStatPublisher.run(exctractedCounters, rj.getID().toString());
} catch (RuntimeException runtimeException) {
LOG.error("Exception " + runtimeException.getClass().getCanonicalName() + " thrown when running clientStatsPublishers. The stack trace is: ", runtimeException);
}
}
}
if (mapProgress == lastMapProgress && reduceProgress == lastReduceProgress && System.currentTimeMillis() < reportTime + maxReportInterval) {
continue;
}
StringBuilder report = new StringBuilder();
report.append(dateFormat.format(Calendar.getInstance().getTime()));
report.append(' ').append(getId());
report.append(" map = ").append(mapProgress).append("%, ");
report.append(" reduce = ").append(reduceProgress).append('%');
// it out.
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > 0) {
cpuMsec = newCpuMSec;
report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
}
}
}
// write out serialized plan with counters to log file
// LOG.info(queryPlan);
String output = report.toString();
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(queryId, this.task);
this.callBackObj.logPlanProgress(ss);
}
}
console.printInfo(output);
task.setStatusMessage(output);
reportTime = System.currentTimeMillis();
}
Counters ctrs = th.getCounters();
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > cpuMsec) {
cpuMsec = newCpuMSec;
}
}
}
if (cpuMsec > 0) {
String status = "MapReduce Total cumulative CPU time: " + Utilities.formatMsecToStr(cpuMsec);
console.printInfo(status);
task.setStatusMessage(status);
}
boolean success;
if (fatal) {
success = false;
} else {
// the last check before the job is completed
if (checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString());
success = false;
} else {
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
}
success = rj.isSuccessful();
}
}
MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());
mapRedStats.setCounters(ctrs);
// update based on the final value of the counters
updateCounters(ctrs, rj);
SessionState ss = SessionState.get();
if (ss != null) {
this.callBackObj.logPlanProgress(ss);
}
// LOG.info(queryPlan);
return mapRedStats;
}
use of org.apache.hadoop.mapred.Counters.Counter in project hive by apache.
the class HiveHistoryImpl method setTaskCounters.
@Override
public void setTaskCounters(String queryId, String taskId, Counters ctrs) {
String id = queryId + ":" + taskId;
QueryInfo ji = queryInfoMap.get(queryId);
StringBuilder sb1 = new StringBuilder("");
TaskInfo ti = taskInfoMap.get(id);
if ((ti == null) || (ctrs == null)) {
return;
}
StringBuilder sb = new StringBuilder("");
try {
boolean first = true;
for (Group group : ctrs) {
for (Counter counter : group) {
if (first) {
first = false;
} else {
sb.append(',');
}
sb.append(group.getDisplayName());
sb.append('.');
sb.append(counter.getDisplayName());
sb.append(':');
sb.append(counter.getCounter());
String tab = getRowCountTableName(counter.getDisplayName());
if (tab != null) {
if (sb1.length() > 0) {
sb1.append(",");
}
sb1.append(tab);
sb1.append('~');
sb1.append(counter.getCounter());
ji.rowCountMap.put(tab, counter.getCounter());
}
}
}
} catch (Exception e) {
LOG.warn(org.apache.hadoop.util.StringUtils.stringifyException(e));
}
if (sb1.length() > 0) {
taskInfoMap.get(id).hm.put(Keys.ROWS_INSERTED.name(), sb1.toString());
queryInfoMap.get(queryId).hm.put(Keys.ROWS_INSERTED.name(), sb1.toString());
}
if (sb.length() > 0) {
taskInfoMap.get(id).hm.put(Keys.TASK_COUNTERS.name(), sb.toString());
}
}
use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class GroupFactoryForTest method testGroupIteratorConcurrency.
@SuppressWarnings("deprecation")
@Test
public void testGroupIteratorConcurrency() {
Counters counters = new Counters();
counters.incrCounter("group1", "counter1", 1);
Group group = counters.getGroup("group1");
Iterator<Counter> iterator = group.iterator();
counters.incrCounter("group1", "counter2", 1);
iterator.next();
}
Aggregations