use of org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator in project carbondata by apache.
the class CarbonHiveRecordReader method initialize.
private void initialize(InputSplit inputSplit, Configuration conf) throws IOException {
// The input split can contain single HDFS block or multiple blocks, so firstly get all the
// blocks and then set them in the query model.
List<CarbonHiveInputSplit> splitList;
if (inputSplit instanceof CarbonHiveInputSplit) {
splitList = new ArrayList<>(1);
splitList.add((CarbonHiveInputSplit) inputSplit);
} else {
throw new RuntimeException("unsupported input split type: " + inputSplit);
}
List<TableBlockInfo> tableBlockInfoList = CarbonHiveInputSplit.createBlocks(splitList);
queryModel.setTableBlockInfos(tableBlockInfoList);
readSupport.initialize(queryModel.getProjectionColumns(), queryModel.getTable());
try {
carbonIterator = new ChunkRowIterator(queryExecutor.execute(queryModel));
} catch (QueryExecutionException e) {
throw new IOException(e.getMessage(), e.getCause());
}
final TypeInfo rowTypeInfo;
final List<String> columnNames;
List<TypeInfo> columnTypes;
// Get column names and sort order
final String colIds = conf.get("hive.io.file.readcolumn.ids");
final String columnTypeProperty = conf.get(serdeConstants.LIST_COLUMN_TYPES);
if (columnTypeProperty.length() == 0) {
columnTypes = new ArrayList<TypeInfo>();
} else {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
}
if (valueObj == null) {
valueObj = new ArrayWritable(Writable.class, new Writable[columnTypes.size()]);
}
if (!colIds.equals("")) {
String[] arraySelectedColId = colIds.split(",");
columnIds = new int[arraySelectedColId.length];
int columnId = 0;
for (int j = 0; j < arraySelectedColId.length; j++) {
columnId = Integer.parseInt(arraySelectedColId[j]);
columnIds[j] = columnId;
}
}
}
use of org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator in project carbondata by apache.
the class CarbonRecordReader method initialize.
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException {
// The input split can contain single HDFS block or multiple blocks, so firstly get all the
// blocks and then set them in the query model.
List<CarbonInputSplit> splitList;
if (inputSplit instanceof CarbonInputSplit) {
splitList = new ArrayList<>(1);
splitList.add((CarbonInputSplit) inputSplit);
} else if (inputSplit instanceof CarbonMultiBlockSplit) {
// contains multiple blocks, this is an optimization for concurrent query.
CarbonMultiBlockSplit multiBlockSplit = (CarbonMultiBlockSplit) inputSplit;
splitList = multiBlockSplit.getAllSplits();
} else {
throw new RuntimeException("unsupported input split type: " + inputSplit);
}
List<TableBlockInfo> tableBlockInfoList = CarbonInputSplit.createBlocks(splitList);
queryModel.setTableBlockInfos(tableBlockInfoList);
readSupport.initialize(queryModel.getProjectionColumns(), queryModel.getTable());
try {
carbonIterator = new ChunkRowIterator(queryExecutor.execute(queryModel));
} catch (QueryExecutionException e) {
throw new InterruptedException(e.getMessage());
}
}
Aggregations