use of org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader in project hive by apache.
the class FetchOperator method getRecordReader.
private RecordReader<WritableComparable, Writable> getRecordReader() throws Exception {
if (!iterSplits.hasNext()) {
List<FetchInputFormatSplit> splits = getNextSplits();
if (splits == null) {
return null;
}
if (!isPartitioned || convertedOI == null) {
currSerDe = tableSerDe;
ObjectConverter = null;
} else {
currSerDe = needConversion(currDesc) ? currDesc.getDeserializer(job) : tableSerDe;
ObjectInspector inputOI = currSerDe.getObjectInspector();
ObjectConverter = ObjectInspectorConverters.getConverter(inputOI, convertedOI);
}
if (isPartitioned) {
row[1] = createPartValue(currDesc, partKeyOI);
}
iterSplits = splits.iterator();
if (LOG.isDebugEnabled()) {
LOG.debug("Creating fetchTask with deserializer typeinfo: " + currSerDe.getObjectInspector().getTypeName());
LOG.debug("deserializer properties:\ntable properties: " + currDesc.getTableDesc().getProperties() + "\npartition properties: " + currDesc.getProperties());
}
}
final FetchInputFormatSplit target = iterSplits.next();
final RecordReader<WritableComparable, Writable> reader = target.getRecordReader(job);
if (hasVC || work.getSplitSample() != null) {
currRecReader = new HiveRecordReader<WritableComparable, Writable>(reader, job) {
@Override
public boolean doNext(WritableComparable key, Writable value) throws IOException {
// each split by table sampling, stop fetching any more (early exit)
if (target.shrinkedLength > 0 && context.getIoCxt().getCurrentBlockStart() > target.shrinkedLength) {
return false;
}
return super.doNext(key, value);
}
};
((HiveContextAwareRecordReader) currRecReader).initIOContext(target, job, target.inputFormat.getClass(), reader);
} else {
currRecReader = reader;
}
key = currRecReader.createKey();
value = currRecReader.createValue();
headerCount = footerCount = 0;
return currRecReader;
}
Aggregations