use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class HDF5BatchReader method writeIntListColumn.
/**
* Helper function to write a 2D int list
* @param rowWriter the row to which the data will be written
* @param name the name of the outer list
* @param list the list of data
*/
private void writeIntListColumn(TupleWriter rowWriter, String name, int[] list) {
int index = rowWriter.tupleSchema().index(name);
if (index == -1) {
ColumnMetadata colSchema = MetadataUtils.newScalar(name, TypeProtos.MinorType.INT, TypeProtos.DataMode.REPEATED);
index = rowWriter.addColumn(colSchema);
}
ScalarWriter arrayWriter = rowWriter.column(index).array().scalar();
int maxElements = Math.min(list.length, PREVIEW_ROW_LIMIT);
for (int i = 0; i < maxElements; i++) {
arrayWriter.setInt(list[i]);
}
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class HDF5BatchReader method writeByteColumn.
/**
* Helper function to write a 1D byte column
*
* @param rowWriter The row to which the data will be written
* @param name The column name
* @param value The value to be written
*/
private void writeByteColumn(TupleWriter rowWriter, String name, byte value) {
ScalarWriter colWriter = getColWriter(rowWriter, name, MinorType.TINYINT);
colWriter.setInt(value);
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class HDF5BatchReader method writeFloat8ListColumn.
private void writeFloat8ListColumn(TupleWriter rowWriter, String name, double[] list) {
int index = rowWriter.tupleSchema().index(name);
if (index == -1) {
ColumnMetadata colSchema = MetadataUtils.newScalar(name, TypeProtos.MinorType.FLOAT8, TypeProtos.DataMode.REPEATED);
index = rowWriter.addColumn(colSchema);
}
ScalarWriter arrayWriter = rowWriter.column(index).array().scalar();
int maxElements = Math.min(list.length, PREVIEW_ROW_LIMIT);
for (int i = 0; i < maxElements; i++) {
arrayWriter.setDouble(list[i]);
}
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class HDF5BatchReader method doubleMatrixHelper.
private void doubleMatrixHelper(double[][] colData, int cols, int rows, RowSetLoader rowWriter) {
// This is the case where a dataset is projected in a metadata query. The result should be a list of lists
TupleMetadata nestedSchema = new SchemaBuilder().addRepeatedList(DOUBLE_COLUMN_NAME).addArray(TypeProtos.MinorType.FLOAT8).resumeSchema().buildSchema();
int index = rowWriter.tupleSchema().index(DOUBLE_COLUMN_NAME);
if (index == -1) {
index = rowWriter.addColumn(nestedSchema.column(DOUBLE_COLUMN_NAME));
}
// The outer array
ArrayWriter listWriter = rowWriter.column(index).array();
// The inner array
ArrayWriter innerWriter = listWriter.array();
// The strings within the inner array
ScalarWriter floatWriter = innerWriter.scalar();
int maxElements = Math.min(colData.length, PREVIEW_ROW_LIMIT);
int maxCols = Math.min(colData[0].length, PREVIEW_COL_LIMIT);
for (int i = 0; i < maxElements; i++) {
for (int k = 0; k < maxCols; k++) {
floatWriter.setDouble(colData[i][k]);
}
listWriter.save();
}
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.
the class ShpBatchReader method writeStringColumn.
private void writeStringColumn(TupleWriter rowWriter, String name, String value) {
int index = rowWriter.tupleSchema().index(name);
if (index == -1) {
ColumnMetadata colSchema = MetadataUtils.newScalar(name, TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL);
index = rowWriter.addColumn(colSchema);
}
ScalarWriter colWriter = rowWriter.scalar(index);
colWriter.setString(value);
}
Aggregations