use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.
the class ExecReducer method close.
@Override
public void close() {
// No row was processed
if (oc == null && isTraceEnabled) {
LOG.trace("Close called without any rows processed");
}
try {
if (groupKey != null) {
// If a operator wants to do some work at the end of a group
if (isTraceEnabled) {
LOG.trace("End Group");
}
reducer.endGroup();
}
reducer.close(abort);
ReportStats rps = new ReportStats(rp, jc);
reducer.preorderMap(rps);
} catch (Exception e) {
if (!abort) {
// signal new failure to map-reduce
LOG.error("Hit error while closing operators - failing tree");
throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
}
} finally {
MapredContext.close();
Utilities.clearWorkMap(jc);
}
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.
the class ReduceRecordProcessor method close.
@Override
void close() {
if (cache != null && cacheKeys != null) {
for (String key : cacheKeys) {
cache.release(key);
}
}
if (dynamicValueCache != null && dynamicValueCacheKeys != null) {
for (String k : dynamicValueCacheKeys) {
dynamicValueCache.release(k);
}
}
try {
if (isAborted()) {
for (ReduceRecordSource rs : sources) {
if (!rs.close()) {
// Preserving the old logic. Hmm...
setAborted(false);
break;
}
}
}
boolean abort = isAborted();
reducer.close(abort);
if (mergeWorkList != null) {
for (BaseWork redWork : mergeWorkList) {
((ReduceWork) redWork).getReducer().close(abort);
}
}
// Need to close the dummyOps as well. The operator pipeline
// is not considered "closed/done" unless all operators are
// done. For broadcast joins that includes the dummy parents.
List<HashTableDummyOperator> dummyOps = reduceWork.getDummyOps();
if (dummyOps != null) {
for (Operator<?> dummyOp : dummyOps) {
dummyOp.close(abort);
}
}
ReportStats rps = new ReportStats(reporter, jconf);
reducer.preorderMap(rps);
} catch (Exception e) {
if (!isAborted()) {
// signal new failure to map-reduce
l4j.error("Hit error while closing operators - failing tree");
throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
}
} finally {
Utilities.clearWorkMap(jconf);
MapredContext.close();
}
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.
the class SparkReduceRecordHandler method close.
@Override
public void close() {
// No row was processed
if (oc == null) {
LOG.trace("Close called without any rows processed");
}
try {
if (groupKey != null) {
// If a operator wants to do some work at the end of a group
LOG.trace("End Group");
reducer.endGroup();
}
if (isLogInfoEnabled) {
logCloseInfo();
}
reducer.close(abort);
if (localWork != null) {
for (Operator<? extends OperatorDesc> dummyOp : localWork.getDummyParentOp()) {
dummyOp.close(abort);
}
}
ReportStats rps = new ReportStats(rp, jc);
reducer.preorderMap(rps);
} catch (Exception e) {
if (!abort) {
// signal new failure to map-reduce
LOG.error("Hit error while closing operators - failing tree");
throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
}
} finally {
MapredContext.close();
Utilities.clearWorkMap(jc);
}
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.
the class MapRecordProcessor method close.
@Override
void close() {
// check if there are IOExceptions
if (!isAborted()) {
setAborted(execContext.getIoCxt().getIOExceptions());
}
if (cache != null && cacheKeys != null) {
for (String k : cacheKeys) {
cache.release(k);
}
}
if (dynamicValueCache != null && dynamicValueCacheKeys != null) {
for (String k : dynamicValueCacheKeys) {
dynamicValueCache.release(k);
}
}
// detecting failed executions by exceptions thrown by the operator tree
try {
if (mapOp == null || mapWork == null) {
return;
}
boolean abort = isAborted();
mapOp.close(abort);
if (mergeMapOpList.isEmpty() == false) {
for (AbstractMapOperator mergeMapOp : mergeMapOpList) {
mergeMapOp.close(abort);
}
}
// Need to close the dummyOps as well. The operator pipeline
// is not considered "closed/done" unless all operators are
// done. For broadcast joins that includes the dummy parents.
List<HashTableDummyOperator> dummyOps = mapWork.getDummyOps();
if (dummyOps != null) {
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.close(abort);
}
}
ReportStats rps = new ReportStats(reporter, jconf);
mapOp.preorderMap(rps);
return;
} catch (Exception e) {
if (!isAborted()) {
// signal new failure to map-reduce
l4j.error("Hit error while closing operators - failing tree");
throw new RuntimeException("Hive Runtime Error while closing operators", e);
}
} finally {
Utilities.clearWorkMap(jconf);
MapredContext.close();
}
}
use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.
the class SparkMapRecordHandler method close.
@Override
public void close() {
// No row was processed
if (oc == null) {
LOG.trace("Close called. no row processed by map.");
}
// check if there are IOExceptions
if (!abort) {
abort = execContext.getIoCxt().getIOExceptions();
}
// ideally hadoop should let us know whether map execution failed or not
try {
mo.close(abort);
//for close the local work
if (localWork != null) {
List<Operator<? extends OperatorDesc>> dummyOps = localWork.getDummyParentOp();
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.close(abort);
}
}
if (isLogInfoEnabled) {
logCloseInfo();
}
ReportStats rps = new ReportStats(rp, jc);
mo.preorderMap(rps);
return;
} catch (Exception e) {
if (!abort) {
// signal new failure to map-reduce
String msg = "Hit error while closing operators - failing tree: " + e;
LOG.error(msg, e);
throw new IllegalStateException(msg, e);
}
} finally {
MapredContext.close();
Utilities.clearWorkMap(jc);
}
}
Aggregations