use of org.apache.flink.table.data.columnar.vector.writable.WritableColumnVector in project flink by apache.
the class ParquetColumnarRowSplitReader method nextBatch.
/**
* Advances to the next batch of rows. Returns false if there are no more.
*/
private boolean nextBatch() throws IOException {
for (WritableColumnVector v : writableVectors) {
v.reset();
}
columnarBatch.setNumRows(0);
if (rowsReturned >= totalRowCount) {
return false;
}
if (rowsReturned == totalCountLoadedSoFar) {
readNextRowGroup();
}
int num = (int) Math.min(batchSize, totalCountLoadedSoFar - rowsReturned);
for (int i = 0; i < columnReaders.length; ++i) {
// noinspection unchecked
columnReaders[i].readToVector(num, writableVectors[i]);
}
rowsReturned += num;
columnarBatch.setNumRows(num);
rowsInBatch = num;
return true;
}
use of org.apache.flink.table.data.columnar.vector.writable.WritableColumnVector in project flink by apache.
the class ParquetVectorizedInputFormat method createReaderBatch.
private ParquetReaderBatch<T> createReaderBatch(SplitT split, MessageType requestedSchema, Pool.Recycler<ParquetReaderBatch<T>> recycler) {
WritableColumnVector[] writableVectors = createWritableVectors(requestedSchema);
VectorizedColumnBatch columnarBatch = batchFactory.create(split, createReadableVectors(writableVectors));
return createReaderBatch(writableVectors, columnarBatch, recycler);
}
Aggregations