Search in sources :

Example 1 with HiveContextAwareRecordReader

use of org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader in project hive by apache.

the class FetchOperator method getRecordReader.

private RecordReader<WritableComparable, Writable> getRecordReader() throws Exception {
    if (!iterSplits.hasNext()) {
        List<FetchInputFormatSplit> splits = getNextSplits();
        if (splits == null) {
            return null;
        }
        if (!isPartitioned || convertedOI == null) {
            currSerDe = tableSerDe;
            ObjectConverter = null;
        } else {
            currSerDe = needConversion(currDesc) ? currDesc.getDeserializer(job) : tableSerDe;
            ObjectInspector inputOI = currSerDe.getObjectInspector();
            ObjectConverter = ObjectInspectorConverters.getConverter(inputOI, convertedOI);
        }
        if (isPartitioned) {
            row[1] = createPartValue(currDesc, partKeyOI);
        }
        iterSplits = splits.iterator();
        if (LOG.isDebugEnabled()) {
            LOG.debug("Creating fetchTask with deserializer typeinfo: " + currSerDe.getObjectInspector().getTypeName());
            LOG.debug("deserializer properties:\ntable properties: " + currDesc.getTableDesc().getProperties() + "\npartition properties: " + currDesc.getProperties());
        }
    }
    final FetchInputFormatSplit target = iterSplits.next();
    final RecordReader<WritableComparable, Writable> reader = target.getRecordReader(job);
    if (hasVC || work.getSplitSample() != null) {
        currRecReader = new HiveRecordReader<WritableComparable, Writable>(reader, job) {

            @Override
            public boolean doNext(WritableComparable key, Writable value) throws IOException {
                // each split by table sampling, stop fetching any more (early exit)
                if (target.shrinkedLength > 0 && context.getIoCxt().getCurrentBlockStart() > target.shrinkedLength) {
                    return false;
                }
                return super.doNext(key, value);
            }
        };
        ((HiveContextAwareRecordReader) currRecReader).initIOContext(target, job, target.inputFormat.getClass(), reader);
    } else {
        currRecReader = reader;
    }
    key = currRecReader.createKey();
    value = currRecReader.createValue();
    headerCount = footerCount = 0;
    return currRecReader;
}
Also used : ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) HiveContextAwareRecordReader(org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader) WritableComparable(org.apache.hadoop.io.WritableComparable) Writable(org.apache.hadoop.io.Writable) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)1 HiveContextAwareRecordReader (org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader)1 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)1 StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)1 Writable (org.apache.hadoop.io.Writable)1 WritableComparable (org.apache.hadoop.io.WritableComparable)1