use of org.apache.hadoop.hive.ql.exec.vector.MapColumnVector in project flink by apache.
the class OrcBulkRowDataWriterTest method getResults.
private static List<RowData> getResults(Reader reader) throws IOException {
List<RowData> results = new ArrayList<>();
RecordReader recordReader = reader.rows();
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
while (recordReader.nextBatch(batch)) {
BytesColumnVector stringVector = (BytesColumnVector) batch.cols[0];
LongColumnVector intVector = (LongColumnVector) batch.cols[1];
ListColumnVector listVector = (ListColumnVector) batch.cols[2];
MapColumnVector mapVector = (MapColumnVector) batch.cols[3];
for (int r = 0; r < batch.size; r++) {
GenericRowData readRowData = new GenericRowData(4);
readRowData.setField(0, readStringData(stringVector, r));
readRowData.setField(1, readInt(intVector, r));
readRowData.setField(2, readList(listVector, r));
readRowData.setField(3, readMap(mapVector, r));
results.add(readRowData);
}
recordReader.close();
}
return results;
}
Aggregations