use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class VectorizedBatchUtil method convertToStandardStructObjectInspector.
public static StandardStructObjectInspector convertToStandardStructObjectInspector(StructObjectInspector structObjectInspector) throws HiveException {
List<? extends StructField> fields = structObjectInspector.getAllStructFieldRefs();
List<ObjectInspector> oids = new ArrayList<ObjectInspector>();
ArrayList<String> columnNames = new ArrayList<String>();
for (StructField field : fields) {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(field.getFieldObjectInspector().getTypeName());
ObjectInspector standardWritableObjectInspector = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo);
oids.add(standardWritableObjectInspector);
columnNames.add(field.getFieldName());
}
return ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, oids);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class VectorizedBatchUtil method createColumnVector.
public static ColumnVector createColumnVector(String typeName) {
typeName = typeName.toLowerCase();
// Allow undecorated CHAR and VARCHAR to support scratch column type names.
if (typeName.equals("char") || typeName.equals("varchar")) {
return new BytesColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
}
TypeInfo typeInfo = (TypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(typeName);
return createColumnVector(typeInfo);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project druid by druid-io.
the class OrcHadoopInputRowParser method initialize.
private void initialize() {
if (typeString == null) {
typeString = typeStringFromParseSpec(parseSpec);
}
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeString);
Preconditions.checkArgument(typeInfo instanceof StructTypeInfo, String.format("typeString should be struct type but not [%s]", typeString));
Properties table = getTablePropertiesFromStructTypeInfo((StructTypeInfo) typeInfo);
serde.initialize(new Configuration(), table);
try {
oip = (StructObjectInspector) serde.getObjectInspector();
} catch (SerDeException e) {
e.printStackTrace();
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class TypeInfoUtils method typeInfosFromStructObjectInspector.
public static ArrayList<TypeInfo> typeInfosFromStructObjectInspector(StructObjectInspector structObjectInspector) {
List<? extends StructField> fields = structObjectInspector.getAllStructFieldRefs();
ArrayList<TypeInfo> typeInfoList = new ArrayList<TypeInfo>(fields.size());
for (StructField field : fields) {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(field.getFieldObjectInspector().getTypeName());
typeInfoList.add(typeInfo);
}
return typeInfoList;
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project drill by apache.
the class HivePartitionDescriptor method getVectorType.
@Override
public TypeProtos.MajorType getVectorType(SchemaPath column, PlannerSettings plannerSettings) {
HiveScan hiveScan = (HiveScan) scanRel.getGroupScan();
String partitionName = column.getAsNamePart().getName();
Map<String, String> partitionNameTypeMap = hiveScan.hiveReadEntry.table.getPartitionNameTypeMap();
String hiveType = partitionNameTypeMap.get(partitionName);
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(hiveType);
TypeProtos.MinorType partitionType = HiveUtilities.getMinorTypeFromHivePrimitiveTypeInfo(primitiveTypeInfo, plannerSettings.getOptions());
return TypeProtos.MajorType.newBuilder().setMode(TypeProtos.DataMode.OPTIONAL).setMinorType(partitionType).build();
}
Aggregations