use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class SparkTask method getOperatorCounters.
private Map<String, List<String>> getOperatorCounters() {
String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
Map<String, List<String>> counters = new HashMap<String, List<String>>();
List<String> hiveCounters = new LinkedList<String>();
counters.put(groupName, hiveCounters);
hiveCounters.add(Operator.HIVECOUNTERCREATEDFILES);
// Spark transformation and Hive operators in SparkWork.
for (MapOperator.Counter counter : MapOperator.Counter.values()) {
hiveCounters.add(counter.toString());
}
SparkWork sparkWork = this.getWork();
for (BaseWork work : sparkWork.getAllWork()) {
for (Operator<? extends OperatorDesc> operator : work.getAllOperators()) {
if (operator instanceof FileSinkOperator) {
for (FileSinkOperator.Counter counter : FileSinkOperator.Counter.values()) {
hiveCounters.add(((FileSinkOperator) operator).getCounterName(counter));
}
} else if (operator instanceof ReduceSinkOperator) {
for (ReduceSinkOperator.Counter counter : ReduceSinkOperator.Counter.values()) {
hiveCounters.add(((ReduceSinkOperator) operator).getCounterName(counter, conf));
}
} else if (operator instanceof ScriptOperator) {
for (ScriptOperator.Counter counter : ScriptOperator.Counter.values()) {
hiveCounters.add(counter.toString());
}
} else if (operator instanceof JoinOperator) {
for (JoinOperator.SkewkeyTableCounter counter : JoinOperator.SkewkeyTableCounter.values()) {
hiveCounters.add(counter.toString());
}
}
}
}
return counters;
}
Aggregations