use of org.apache.drill.exec.store.hdf5.writers.HDF5IntDataWriter in project drill by apache.
the class HDF5BatchReader method buildSchemaFor2DimensionalDataset.
/**
* Builds a Drill schema from a dataset with 2 or more dimensions. HDF5 only
* supports INT, LONG, DOUBLE and FLOAT for >2 data types so this function is
* not as inclusive as the 1D function. This function will build the schema
* by adding DataWriters to the dataWriters array.
*
* @param dataset
* The dataset which Drill will use to build a schema
*/
private void buildSchemaFor2DimensionalDataset(Dataset dataset) {
MinorType currentDataType = HDF5Utils.getDataType(dataset.getDataType());
// Case for null or unknown data types:
if (currentDataType == null) {
logger.warn("Couldn't add {}", dataset.getJavaType().getName());
return;
}
long cols = dimensions[1];
String tempFieldName;
for (int i = 0; i < cols; i++) {
switch(currentDataType) {
case INT:
tempFieldName = INT_COLUMN_PREFIX + i;
dataWriters.add(new HDF5IntDataWriter(hdfFile, writerSpec, readerConfig.defaultPath, tempFieldName, i));
break;
case BIGINT:
tempFieldName = LONG_COLUMN_PREFIX + i;
dataWriters.add(new HDF5LongDataWriter(hdfFile, writerSpec, readerConfig.defaultPath, tempFieldName, i));
break;
case FLOAT8:
tempFieldName = DOUBLE_COLUMN_PREFIX + i;
dataWriters.add(new HDF5DoubleDataWriter(hdfFile, writerSpec, readerConfig.defaultPath, tempFieldName, i));
break;
case FLOAT4:
tempFieldName = FLOAT_COLUMN_PREFIX + i;
dataWriters.add(new HDF5FloatDataWriter(hdfFile, writerSpec, readerConfig.defaultPath, tempFieldName, i));
break;
default:
throw new UnsupportedOperationException(currentDataType.name());
}
}
}
Aggregations