use of org.apache.hadoop.hive.llap.io.encoded.OrcEncodedDataReader in project hive by apache.
the class OrcColumnVectorProducer method createReadPipeline.
@Override
public ReadPipeline createReadPipeline(Consumer<ColumnVectorBatch> consumer, FileSplit split, List<Integer> columnIds, SearchArgument sarg, String[] columnNames, QueryFragmentCounters counters, TypeDescription readerSchema, InputFormat<?, ?> unused0, Deserializer unused1, Reporter reporter, JobConf job, Map<Path, PartitionDesc> unused2) throws IOException {
cacheMetrics.incrCacheReadRequests();
OrcEncodedDataConsumer edc = new OrcEncodedDataConsumer(consumer, columnIds.size(), _skipCorrupt, counters, ioMetrics);
// Note: we use global conf here and ignore JobConf.
OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager, metadataCache, conf, split, columnIds, sarg, columnNames, edc, counters, readerSchema);
edc.init(reader, reader);
return edc;
}
use of org.apache.hadoop.hive.llap.io.encoded.OrcEncodedDataReader in project hive by apache.
the class OrcColumnVectorProducer method createReadPipeline.
@Override
public ReadPipeline createReadPipeline(Consumer<ColumnVectorBatch> consumer, FileSplit split, Includes includes, SearchArgument sarg, QueryFragmentCounters counters, SchemaEvolutionFactory sef, InputFormat<?, ?> unused0, Deserializer unused1, Reporter reporter, JobConf job, Map<Path, PartitionDesc> parts) throws IOException {
cacheMetrics.incrCacheReadRequests();
OrcEncodedDataConsumer edc = new OrcEncodedDataConsumer(consumer, includes, counters, ioMetrics);
OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager, metadataCache, conf, job, split, includes, sarg, edc, counters, sef, tracePool, parts, pathCache);
edc.init(reader, reader, reader.getTrace());
return edc;
}
Aggregations