use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.
the class BatchSchema method equals.
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BatchSchema other = (BatchSchema) obj;
if (fields == null) {
if (other.fields != null) {
return false;
}
} else if (!fields.equals(other.fields)) {
return false;
}
for (int i = 0; i < fields.size(); i++) {
MajorType t1 = fields.get(i).getType();
MajorType t2 = other.fields.get(i).getType();
if (t1 == null) {
if (t2 != null) {
return false;
}
} else {
if (!majorTypeEqual(t1, t2)) {
return false;
}
}
}
if (selectionVectorMode != other.selectionVectorMode) {
return false;
}
return true;
}
use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.
the class MapUtility method writeToMapFromReader.
/*
* Function to read a value from the field reader, detect the type, construct the appropriate value holder
* and use the value holder to write to the Map.
*/
// TODO : This should be templatized and generated using freemarker
public static void writeToMapFromReader(FieldReader fieldReader, BaseWriter.MapWriter mapWriter) {
try {
MajorType valueMajorType = fieldReader.getType();
MinorType valueMinorType = valueMajorType.getMinorType();
boolean repeated = false;
if (valueMajorType.getMode() == TypeProtos.DataMode.REPEATED) {
repeated = true;
}
switch(valueMinorType) {
case TINYINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).tinyInt());
} else {
fieldReader.copyAsValue(mapWriter.tinyInt(MappifyUtility.fieldValue));
}
break;
case SMALLINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).smallInt());
} else {
fieldReader.copyAsValue(mapWriter.smallInt(MappifyUtility.fieldValue));
}
break;
case BIGINT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).bigInt());
} else {
fieldReader.copyAsValue(mapWriter.bigInt(MappifyUtility.fieldValue));
}
break;
case INT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).integer());
} else {
fieldReader.copyAsValue(mapWriter.integer(MappifyUtility.fieldValue));
}
break;
case UINT1:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt1());
} else {
fieldReader.copyAsValue(mapWriter.uInt1(MappifyUtility.fieldValue));
}
break;
case UINT2:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt2());
} else {
fieldReader.copyAsValue(mapWriter.uInt2(MappifyUtility.fieldValue));
}
break;
case UINT4:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt4());
} else {
fieldReader.copyAsValue(mapWriter.uInt4(MappifyUtility.fieldValue));
}
break;
case UINT8:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).uInt8());
} else {
fieldReader.copyAsValue(mapWriter.uInt8(MappifyUtility.fieldValue));
}
break;
case DECIMAL9:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal9());
} else {
fieldReader.copyAsValue(mapWriter.decimal9(MappifyUtility.fieldValue));
}
break;
case DECIMAL18:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal18());
} else {
fieldReader.copyAsValue(mapWriter.decimal18(MappifyUtility.fieldValue));
}
break;
case DECIMAL28SPARSE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal28Sparse());
} else {
fieldReader.copyAsValue(mapWriter.decimal28Sparse(MappifyUtility.fieldValue));
}
break;
case DECIMAL38SPARSE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).decimal38Sparse());
} else {
fieldReader.copyAsValue(mapWriter.decimal38Sparse(MappifyUtility.fieldValue));
}
break;
case DATE:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).date());
} else {
fieldReader.copyAsValue(mapWriter.date(MappifyUtility.fieldValue));
}
break;
case TIME:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).time());
} else {
fieldReader.copyAsValue(mapWriter.time(MappifyUtility.fieldValue));
}
break;
case TIMESTAMP:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).timeStamp());
} else {
fieldReader.copyAsValue(mapWriter.timeStamp(MappifyUtility.fieldValue));
}
break;
case INTERVAL:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).interval());
} else {
fieldReader.copyAsValue(mapWriter.interval(MappifyUtility.fieldValue));
}
break;
case INTERVALDAY:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).intervalDay());
} else {
fieldReader.copyAsValue(mapWriter.intervalDay(MappifyUtility.fieldValue));
}
break;
case INTERVALYEAR:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).intervalYear());
} else {
fieldReader.copyAsValue(mapWriter.intervalYear(MappifyUtility.fieldValue));
}
break;
case FLOAT4:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).float4());
} else {
fieldReader.copyAsValue(mapWriter.float4(MappifyUtility.fieldValue));
}
break;
case FLOAT8:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).float8());
} else {
fieldReader.copyAsValue(mapWriter.float8(MappifyUtility.fieldValue));
}
break;
case BIT:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).bit());
} else {
fieldReader.copyAsValue(mapWriter.bit(MappifyUtility.fieldValue));
}
break;
case VARCHAR:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).varChar());
} else {
fieldReader.copyAsValue(mapWriter.varChar(MappifyUtility.fieldValue));
}
break;
case VARBINARY:
if (repeated) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).varBinary());
} else {
fieldReader.copyAsValue(mapWriter.varBinary(MappifyUtility.fieldValue));
}
break;
case MAP:
if (valueMajorType.getMode() == TypeProtos.DataMode.REPEATED) {
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).map());
} else {
fieldReader.copyAsValue(mapWriter.map(MappifyUtility.fieldValue));
}
break;
case LIST:
fieldReader.copyAsValue(mapWriter.list(MappifyUtility.fieldValue).list());
break;
default:
throw new DrillRuntimeException(String.format("kvgen does not support input of type: %s", valueMinorType));
}
} catch (ClassCastException e) {
final MaterializedField field = fieldReader.getField();
throw new DrillRuntimeException(String.format(TYPE_MISMATCH_ERROR, field.getPath(), field.getType()));
}
}
use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.
the class PreparedStatementProvider method serializeColumn.
/**
* Serialize the given {@link SerializedField} into a {@link ResultColumnMetadata}.
* @param field
* @return
*/
private static ResultColumnMetadata serializeColumn(SerializedField field) {
final ResultColumnMetadata.Builder builder = ResultColumnMetadata.newBuilder();
final MajorType majorType = field.getMajorType();
final MinorType minorType = majorType.getMinorType();
/**
* Defaults to "DRILL" as drill has as only one catalog.
*/
builder.setCatalogName(InfoSchemaConstants.IS_CATALOG_NAME);
/**
* Designated column's schema name. Empty string if not applicable. Initial implementation defaults to empty string
* as we use LIMIT 0 queries to get the schema and schema info is lost. If we derive the schema from plan, we may
* get the right value.
*/
builder.setSchemaName("");
/**
* Designated column's table name. Not set if not applicable. Initial implementation defaults to empty string as
* we use LIMIT 0 queries to get the schema and table info is lost. If we derive the table from plan, we may get
* the right value.
*/
builder.setTableName("");
builder.setColumnName(field.getNamePart().getName());
/**
* Column label name for display or print purposes.
* Ex. a column named "empName" might be labeled as "Employee Name".
* Initial implementation defaults to same value as column name.
*/
builder.setLabel(field.getNamePart().getName());
/**
* Data type in string format. Value is SQL standard type.
*/
builder.setDataType(Types.getSqlTypeName(majorType));
builder.setIsNullable(majorType.getMode() == DataMode.OPTIONAL);
/**
* For numeric data, this is the maximum precision.
* For character data, this is the length in characters.
* For datetime data types, this is the length in characters of the String representation
* (assuming the maximum allowed precision of the fractional seconds component).
* For binary data, this is the length in bytes.
* For all other types 0 is returned where the column size is not applicable.
*/
builder.setPrecision(Types.getPrecision(field.getMajorType()));
/**
* Column's number of digits to right of the decimal point. 0 is returned for types where the scale is not applicable
*/
builder.setScale(Types.getScale(majorType));
/**
* Indicates whether values in the designated column are signed numbers.
*/
builder.setSigned(Types.isNumericType(majorType));
/**
* Maximum number of characters required to display data from the column.
*/
builder.setDisplaySize(Types.getJdbcDisplaySize(majorType));
/**
* Is the column an aliased column. Initial implementation defaults to true as we derive schema from LIMIT 0 query and
* not plan
*/
builder.setIsAliased(true);
builder.setSearchability(ColumnSearchability.ALL);
builder.setUpdatability(ColumnUpdatability.READ_ONLY);
builder.setAutoIncrement(false);
builder.setCaseSensitivity(false);
builder.setSortable(Types.isSortable(minorType));
/**
* Returns the fully-qualified name of the Java class whose instances are manufactured if the method
* ResultSet.getObject is called to retrieve a value from the column. Applicable only to JDBC clients.
*/
builder.setClassName(DRILL_TYPE_TO_JDBC_CLASSNAME.get(minorType));
builder.setIsCurrency(false);
return builder.build();
}
use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.
the class KuduRecordReader method initCols.
private void initCols(Schema schema) throws SchemaChangeException {
ImmutableList.Builder<ProjectedColumnInfo> pciBuilder = ImmutableList.builder();
for (int i = 0; i < schema.getColumnCount(); i++) {
ColumnSchema col = schema.getColumnByIndex(i);
final String name = col.getName();
final Type kuduType = col.getType();
MinorType minorType = TYPES.get(kuduType);
if (minorType == null) {
logger.warn("Ignoring column that is unsupported.", UserException.unsupportedError().message("A column you queried has a data type that is not currently supported by the Kudu storage plugin. " + "The column's name was %s and its Kudu data type was %s. ", name, kuduType.toString()).addContext("column Name", name).addContext("plugin", "kudu").build(logger));
continue;
}
MajorType majorType;
if (col.isNullable()) {
majorType = Types.optional(minorType);
} else {
majorType = Types.required(minorType);
}
MaterializedField field = MaterializedField.create(name, majorType);
final Class<? extends ValueVector> clazz = (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(minorType, majorType.getMode());
ValueVector vector = output.addField(field, clazz);
vector.allocateNew();
ProjectedColumnInfo pci = new ProjectedColumnInfo();
pci.vv = vector;
pci.kuduColumn = col;
pci.index = i;
pciBuilder.add(pci);
}
projectedCols = pciBuilder.build();
}
use of org.apache.drill.common.types.TypeProtos.MajorType in project drill by apache.
the class JdbcRecordReader method setup.
@Override
public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
try {
this.operatorContext = operatorContext;
connection = source.getConnection();
statement = connection.createStatement();
resultSet = statement.executeQuery(sql);
final ResultSetMetaData meta = resultSet.getMetaData();
final int columns = meta.getColumnCount();
ImmutableList.Builder<ValueVector> vectorBuilder = ImmutableList.builder();
ImmutableList.Builder<Copier<?>> copierBuilder = ImmutableList.builder();
for (int i = 1; i <= columns; i++) {
final String name = meta.getColumnLabel(i);
final int jdbcType = meta.getColumnType(i);
final int width = meta.getPrecision(i);
final int scale = meta.getScale(i);
MinorType minorType = JDBC_TYPE_MAPPINGS.get(jdbcType);
if (minorType == null) {
logger.warn("Ignoring column that is unsupported.", UserException.unsupportedError().message("A column you queried has a data type that is not currently supported by the JDBC storage plugin. " + "The column's name was %s and its JDBC data type was %s. ", name, nameFromType(jdbcType)).addContext("sql", sql).addContext("column Name", name).addContext("plugin", storagePluginName).build(logger));
continue;
}
final MajorType type = Types.optional(minorType);
final MaterializedField field = MaterializedField.create(name, type);
final Class<? extends ValueVector> clazz = (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(minorType, type.getMode());
ValueVector vector = output.addField(field, clazz);
vectorBuilder.add(vector);
copierBuilder.add(getCopier(jdbcType, i, resultSet, vector));
}
vectors = vectorBuilder.build();
copiers = copierBuilder.build();
} catch (SQLException | SchemaChangeException e) {
throw UserException.dataReadError(e).message("The JDBC storage plugin failed while trying setup the SQL query. ").addContext("sql", sql).addContext("plugin", storagePluginName).build(logger);
}
}
Aggregations