Search in sources :

Example 1 with ReportStats

use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.

the class ExecReducer method close.

@Override
public void close() {
    // No row was processed
    if (oc == null && isTraceEnabled) {
        LOG.trace("Close called without any rows processed");
    }
    try {
        if (groupKey != null) {
            // If a operator wants to do some work at the end of a group
            if (isTraceEnabled) {
                LOG.trace("End Group");
            }
            reducer.endGroup();
        }
        reducer.close(abort);
        ReportStats rps = new ReportStats(rp, jc);
        reducer.preorderMap(rps);
    } catch (Exception e) {
        if (!abort) {
            // signal new failure to map-reduce
            LOG.error("Hit error while closing operators - failing tree");
            throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
        }
    } finally {
        MapredContext.close();
        Utilities.clearWorkMap(jc);
    }
}
Also used : ReportStats(org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats) IOException(java.io.IOException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException)

Example 2 with ReportStats

use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.

the class ReduceRecordProcessor method close.

@Override
void close() {
    if (cache != null && cacheKeys != null) {
        for (String key : cacheKeys) {
            cache.release(key);
        }
    }
    if (dynamicValueCache != null && dynamicValueCacheKeys != null) {
        for (String k : dynamicValueCacheKeys) {
            dynamicValueCache.release(k);
        }
    }
    try {
        if (isAborted()) {
            for (ReduceRecordSource rs : sources) {
                if (!rs.close()) {
                    // Preserving the old logic. Hmm...
                    setAborted(false);
                    break;
                }
            }
        }
        boolean abort = isAborted();
        reducer.close(abort);
        if (mergeWorkList != null) {
            for (BaseWork redWork : mergeWorkList) {
                ((ReduceWork) redWork).getReducer().close(abort);
            }
        }
        // Need to close the dummyOps as well. The operator pipeline
        // is not considered "closed/done" unless all operators are
        // done. For broadcast joins that includes the dummy parents.
        List<HashTableDummyOperator> dummyOps = reduceWork.getDummyOps();
        if (dummyOps != null) {
            for (Operator<?> dummyOp : dummyOps) {
                dummyOp.close(abort);
            }
        }
        ReportStats rps = new ReportStats(reporter, jconf);
        reducer.preorderMap(rps);
    } catch (Exception e) {
        if (!isAborted()) {
            // signal new failure to map-reduce
            l4j.error("Hit error while closing operators - failing tree");
            throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
        }
    } finally {
        Utilities.clearWorkMap(jconf);
        MapredContext.close();
    }
}
Also used : ReportStats(org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 3 with ReportStats

use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.

the class SparkReduceRecordHandler method close.

@Override
public void close() {
    // No row was processed
    if (oc == null) {
        LOG.trace("Close called without any rows processed");
    }
    try {
        if (groupKey != null) {
            // If a operator wants to do some work at the end of a group
            LOG.trace("End Group");
            reducer.endGroup();
        }
        if (isLogInfoEnabled) {
            logCloseInfo();
        }
        reducer.close(abort);
        if (localWork != null) {
            for (Operator<? extends OperatorDesc> dummyOp : localWork.getDummyParentOp()) {
                dummyOp.close(abort);
            }
        }
        ReportStats rps = new ReportStats(rp, jc);
        reducer.preorderMap(rps);
    } catch (Exception e) {
        if (!abort) {
            // signal new failure to map-reduce
            LOG.error("Hit error while closing operators - failing tree");
            throw new RuntimeException("Hive Runtime Error while closing operators: " + e.getMessage(), e);
        }
    } finally {
        MapredContext.close();
        Utilities.clearWorkMap(jc);
    }
}
Also used : ReportStats(org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats) IOException(java.io.IOException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException)

Example 4 with ReportStats

use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.

the class MapRecordProcessor method close.

@Override
void close() {
    // check if there are IOExceptions
    if (!isAborted()) {
        setAborted(execContext.getIoCxt().getIOExceptions());
    }
    if (cache != null && cacheKeys != null) {
        for (String k : cacheKeys) {
            cache.release(k);
        }
    }
    if (dynamicValueCache != null && dynamicValueCacheKeys != null) {
        for (String k : dynamicValueCacheKeys) {
            dynamicValueCache.release(k);
        }
    }
    // detecting failed executions by exceptions thrown by the operator tree
    try {
        if (mapOp == null || mapWork == null) {
            return;
        }
        boolean abort = isAborted();
        mapOp.close(abort);
        if (mergeMapOpList.isEmpty() == false) {
            for (AbstractMapOperator mergeMapOp : mergeMapOpList) {
                mergeMapOp.close(abort);
            }
        }
        // Need to close the dummyOps as well. The operator pipeline
        // is not considered "closed/done" unless all operators are
        // done. For broadcast joins that includes the dummy parents.
        List<HashTableDummyOperator> dummyOps = mapWork.getDummyOps();
        if (dummyOps != null) {
            for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
                dummyOp.close(abort);
            }
        }
        ReportStats rps = new ReportStats(reporter, jconf);
        mapOp.preorderMap(rps);
        return;
    } catch (Exception e) {
        if (!isAborted()) {
            // signal new failure to map-reduce
            l4j.error("Hit error while closing operators - failing tree");
            throw new RuntimeException("Hive Runtime Error while closing operators", e);
        }
    } finally {
        Utilities.clearWorkMap(jconf);
        MapredContext.close();
    }
}
Also used : AbstractMapOperator(org.apache.hadoop.hive.ql.exec.AbstractMapOperator) ReportStats(org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) IOException(java.io.IOException)

Example 5 with ReportStats

use of org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats in project hive by apache.

the class SparkMapRecordHandler method close.

@Override
public void close() {
    // No row was processed
    if (oc == null) {
        LOG.trace("Close called. no row processed by map.");
    }
    // check if there are IOExceptions
    if (!abort) {
        abort = execContext.getIoCxt().getIOExceptions();
    }
    // ideally hadoop should let us know whether map execution failed or not
    try {
        mo.close(abort);
        //for close the local work
        if (localWork != null) {
            List<Operator<? extends OperatorDesc>> dummyOps = localWork.getDummyParentOp();
            for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
                dummyOp.close(abort);
            }
        }
        if (isLogInfoEnabled) {
            logCloseInfo();
        }
        ReportStats rps = new ReportStats(rp, jc);
        mo.preorderMap(rps);
        return;
    } catch (Exception e) {
        if (!abort) {
            // signal new failure to map-reduce
            String msg = "Hit error while closing operators - failing tree: " + e;
            LOG.error(msg, e);
            throw new IllegalStateException(msg, e);
        }
    } finally {
        MapredContext.close();
        Utilities.clearWorkMap(jc);
    }
}
Also used : Operator(org.apache.hadoop.hive.ql.exec.Operator) MapOperator(org.apache.hadoop.hive.ql.exec.MapOperator) AbstractMapOperator(org.apache.hadoop.hive.ql.exec.AbstractMapOperator) VectorMapOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator) ReportStats(org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) IOException(java.io.IOException)

Aggregations

ReportStats (org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats)5 IOException (java.io.IOException)4 AbstractMapOperator (org.apache.hadoop.hive.ql.exec.AbstractMapOperator)2 HashTableDummyOperator (org.apache.hadoop.hive.ql.exec.HashTableDummyOperator)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 SerDeException (org.apache.hadoop.hive.serde2.SerDeException)2 MapOperator (org.apache.hadoop.hive.ql.exec.MapOperator)1 Operator (org.apache.hadoop.hive.ql.exec.Operator)1 VectorMapOperator (org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator)1 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)1 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)1