use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class VectorRandomRowSource method getObjectInspector.
private ObjectInspector getObjectInspector(TypeInfo typeInfo) {
final ObjectInspector objectInspector;
switch(typeInfo.getCategory()) {
case PRIMITIVE:
{
final PrimitiveTypeInfo primitiveType = (PrimitiveTypeInfo) typeInfo;
objectInspector = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(primitiveType);
}
break;
case MAP:
{
final MapTypeInfo mapType = (MapTypeInfo) typeInfo;
final MapObjectInspector mapInspector = ObjectInspectorFactory.getStandardMapObjectInspector(getObjectInspector(mapType.getMapKeyTypeInfo()), getObjectInspector(mapType.getMapValueTypeInfo()));
objectInspector = mapInspector;
}
break;
case LIST:
{
final ListTypeInfo listType = (ListTypeInfo) typeInfo;
final ListObjectInspector listInspector = ObjectInspectorFactory.getStandardListObjectInspector(getObjectInspector(listType.getListElementTypeInfo()));
objectInspector = listInspector;
}
break;
case STRUCT:
{
final StructTypeInfo structType = (StructTypeInfo) typeInfo;
final List<TypeInfo> fieldTypes = structType.getAllStructFieldTypeInfos();
final List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
for (TypeInfo fieldType : fieldTypes) {
fieldInspectors.add(getObjectInspector(fieldType));
}
final StructObjectInspector structInspector = ObjectInspectorFactory.getStandardStructObjectInspector(structType.getAllStructFieldNames(), fieldInspectors);
objectInspector = structInspector;
}
break;
case UNION:
{
final UnionTypeInfo unionType = (UnionTypeInfo) typeInfo;
final List<TypeInfo> fieldTypes = unionType.getAllUnionObjectTypeInfos();
final List<ObjectInspector> fieldInspectors = new ArrayList<ObjectInspector>();
for (TypeInfo fieldType : fieldTypes) {
fieldInspectors.add(getObjectInspector(fieldType));
}
final UnionObjectInspector unionInspector = ObjectInspectorFactory.getStandardUnionObjectInspector(fieldInspectors);
objectInspector = unionInspector;
}
break;
default:
throw new RuntimeException("Unexpected category " + typeInfo.getCategory());
}
Preconditions.checkState(objectInspector != null);
return objectInspector;
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project carbondata by apache.
the class CarbonHiveSerDe method initialize.
@Override
public void initialize(@Nullable Configuration configuration, Properties tbl) throws SerDeException {
final TypeInfo rowTypeInfo;
final List<String> columnNames;
final List<TypeInfo> columnTypes;
// Get column names and sort order
assert configuration != null;
final String columnNameProperty = tbl.getProperty(serdeConstants.LIST_COLUMNS);
final String columnTypeProperty = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
if (columnNameProperty.length() == 0) {
columnNames = new ArrayList<String>();
} else {
columnNames = Arrays.asList(columnNameProperty.split(","));
}
if (columnTypeProperty.length() == 0) {
columnTypes = new ArrayList<TypeInfo>();
} else {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
}
// Create row related objects
rowTypeInfo = TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
this.objInspector = new CarbonObjectInspector((StructTypeInfo) rowTypeInfo);
// Stats part
serializedSize = 0;
deserializedSize = 0;
status = LAST_OPERATION.UNKNOWN;
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project incubator-gobblin by apache.
the class HiveAvroORCQueryGenerator method escapeHiveType.
/**
* Escape the Hive nested field names.
* @param type Primitive or nested Hive type.
* @return Escaped Hive nested field.
*/
public static String escapeHiveType(String type) {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type);
// Primitve
if (ObjectInspector.Category.PRIMITIVE.equals(typeInfo.getCategory())) {
return type;
} else // List
if (ObjectInspector.Category.LIST.equals(typeInfo.getCategory())) {
ListTypeInfo listTypeInfo = (ListTypeInfo) typeInfo;
return org.apache.hadoop.hive.serde.serdeConstants.LIST_TYPE_NAME + "<" + escapeHiveType(listTypeInfo.getListElementTypeInfo().getTypeName()) + ">";
} else // Map
if (ObjectInspector.Category.MAP.equals(typeInfo.getCategory())) {
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return org.apache.hadoop.hive.serde.serdeConstants.MAP_TYPE_NAME + "<" + escapeHiveType(mapTypeInfo.getMapKeyTypeInfo().getTypeName()) + "," + escapeHiveType(mapTypeInfo.getMapValueTypeInfo().getTypeName()) + ">";
} else // Struct
if (ObjectInspector.Category.STRUCT.equals(typeInfo.getCategory())) {
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<String> allStructFieldNames = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> allStructFieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
StringBuilder sb = new StringBuilder();
sb.append(serdeConstants.STRUCT_TYPE_NAME + "<");
for (int i = 0; i < allStructFieldNames.size(); i++) {
if (i > 0) {
sb.append(",");
}
sb.append("`");
sb.append(allStructFieldNames.get(i));
sb.append("`");
sb.append(":");
sb.append(escapeHiveType(allStructFieldTypeInfos.get(i).getTypeName()));
}
sb.append(">");
return sb.toString();
} else // Union
if (ObjectInspector.Category.UNION.equals(typeInfo.getCategory())) {
UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
List<TypeInfo> allUnionObjectTypeInfos = unionTypeInfo.getAllUnionObjectTypeInfos();
StringBuilder sb = new StringBuilder();
sb.append(serdeConstants.UNION_TYPE_NAME + "<");
for (int i = 0; i < allUnionObjectTypeInfos.size(); i++) {
if (i > 0) {
sb.append(",");
}
sb.append(escapeHiveType(allUnionObjectTypeInfos.get(i).getTypeName()));
}
sb.append(">");
return sb.toString();
} else {
throw new RuntimeException("Unknown type encountered: " + type);
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project drill by axbaretto.
the class HiveRecordReader method init.
private void init() throws ExecutionSetupException {
final JobConf job = new JobConf(hiveConf);
// Get the configured default val
defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname);
Properties tableProperties;
try {
tableProperties = MetaStoreUtils.getTableMetadata(table);
final Properties partitionProperties = (partition == null) ? tableProperties : HiveUtilities.getPartitionMetadata(partition, table);
HiveUtilities.addConfToJob(job, partitionProperties);
final SerDe tableSerDe = createSerDe(job, table.getSd().getSerdeInfo().getSerializationLib(), tableProperties);
final StructObjectInspector tableOI = getStructOI(tableSerDe);
if (partition != null) {
partitionSerDe = createSerDe(job, partition.getSd().getSerdeInfo().getSerializationLib(), partitionProperties);
partitionOI = getStructOI(partitionSerDe);
finalOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI(partitionOI, tableOI);
partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(partitionOI, finalOI);
job.setInputFormat(HiveUtilities.getInputFormatClass(job, partition.getSd(), table));
} else {
// For non-partitioned tables, there is no need to create converter as there are no schema changes expected.
partitionSerDe = tableSerDe;
partitionOI = tableOI;
partTblObjectInspectorConverter = null;
finalOI = tableOI;
job.setInputFormat(HiveUtilities.getInputFormatClass(job, table.getSd(), table));
}
// Get list of partition column names
final List<String> partitionNames = Lists.newArrayList();
for (FieldSchema field : table.getPartitionKeys()) {
partitionNames.add(field.getName());
}
// We should always get the columns names from ObjectInspector. For some of the tables (ex. avro) metastore
// may not contain the schema, instead it is derived from other sources such as table properties or external file.
// SerDe object knows how to get the schema with all the config and table properties passed in initialization.
// ObjectInspector created from the SerDe object has the schema.
final StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(finalOI);
final List<String> tableColumnNames = sTypeInfo.getAllStructFieldNames();
// Select list of columns for project pushdown into Hive SerDe readers.
final List<Integer> columnIds = Lists.newArrayList();
if (isStarQuery()) {
selectedColumnNames = tableColumnNames;
for (int i = 0; i < selectedColumnNames.size(); i++) {
columnIds.add(i);
}
selectedPartitionNames = partitionNames;
} else {
selectedColumnNames = Lists.newArrayList();
for (SchemaPath field : getColumns()) {
String columnName = field.getRootSegment().getPath();
if (partitionNames.contains(columnName)) {
selectedPartitionNames.add(columnName);
} else {
columnIds.add(tableColumnNames.indexOf(columnName));
selectedColumnNames.add(columnName);
}
}
}
ColumnProjectionUtils.appendReadColumns(job, columnIds, selectedColumnNames);
for (String columnName : selectedColumnNames) {
ObjectInspector fieldOI = finalOI.getStructFieldRef(columnName).getFieldObjectInspector();
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldOI.getTypeName());
selectedColumnObjInspectors.add(fieldOI);
selectedColumnTypes.add(typeInfo);
selectedColumnFieldConverters.add(HiveFieldConverter.create(typeInfo, fragmentContext));
}
for (int i = 0; i < table.getPartitionKeys().size(); i++) {
FieldSchema field = table.getPartitionKeys().get(i);
if (selectedPartitionNames.contains(field.getName())) {
TypeInfo pType = TypeInfoUtils.getTypeInfoFromTypeString(field.getType());
selectedPartitionTypes.add(pType);
if (partition != null) {
selectedPartitionValues.add(HiveUtilities.convertPartitionType(pType, partition.getValues().get(i), defaultPartitionValue));
}
}
}
} catch (Exception e) {
throw new ExecutionSetupException("Failure while initializing HiveRecordReader: " + e.getMessage(), e);
}
if (!empty) {
try {
reader = (org.apache.hadoop.mapred.RecordReader<Object, Object>) job.getInputFormat().getRecordReader(inputSplit, job, Reporter.NULL);
} catch (Exception e) {
throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
}
key = reader.createKey();
skipRecordsInspector = new SkipRecordsInspector(tableProperties, reader);
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project drill by axbaretto.
the class HiveAbstractReader method init.
private void init() throws ExecutionSetupException {
final JobConf job = new JobConf(hiveConf);
// Get the configured default val
defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname);
Properties tableProperties;
try {
tableProperties = HiveUtilities.getTableMetadata(table);
final Properties partitionProperties = (partition == null) ? tableProperties : HiveUtilities.getPartitionMetadata(partition, table);
HiveUtilities.addConfToJob(job, partitionProperties);
final SerDe tableSerDe = createSerDe(job, table.getSd().getSerdeInfo().getSerializationLib(), tableProperties);
final StructObjectInspector tableOI = getStructOI(tableSerDe);
if (partition != null) {
partitionSerDe = createSerDe(job, partition.getSd().getSerdeInfo().getSerializationLib(), partitionProperties);
partitionOI = getStructOI(partitionSerDe);
finalOI = (StructObjectInspector) ObjectInspectorConverters.getConvertedOI(partitionOI, tableOI);
partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(partitionOI, finalOI);
job.setInputFormat(HiveUtilities.getInputFormatClass(job, partition.getSd(), table));
} else {
// For non-partitioned tables, there is no need to create converter as there are no schema changes expected.
partitionSerDe = tableSerDe;
partitionOI = tableOI;
partTblObjectInspectorConverter = null;
finalOI = tableOI;
job.setInputFormat(HiveUtilities.getInputFormatClass(job, table.getSd(), table));
}
if (logger.isTraceEnabled()) {
for (StructField field : finalOI.getAllStructFieldRefs()) {
logger.trace("field in finalOI: {}", field.getClass().getName());
}
logger.trace("partitionSerDe class is {} {}", partitionSerDe.getClass().getName());
}
// Get list of partition column names
final List<String> partitionNames = Lists.newArrayList();
for (FieldSchema field : table.getPartitionKeys()) {
partitionNames.add(field.getName());
}
// We should always get the columns names from ObjectInspector. For some of the tables (ex. avro) metastore
// may not contain the schema, instead it is derived from other sources such as table properties or external file.
// SerDe object knows how to get the schema with all the config and table properties passed in initialization.
// ObjectInspector created from the SerDe object has the schema.
final StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(finalOI);
final List<String> tableColumnNames = sTypeInfo.getAllStructFieldNames();
// Select list of columns for project pushdown into Hive SerDe readers.
final List<Integer> columnIds = Lists.newArrayList();
if (isStarQuery()) {
selectedColumnNames = tableColumnNames;
for (int i = 0; i < selectedColumnNames.size(); i++) {
columnIds.add(i);
}
selectedPartitionNames = partitionNames;
} else {
selectedColumnNames = Lists.newArrayList();
for (SchemaPath field : getColumns()) {
String columnName = field.getRootSegment().getPath();
if (partitionNames.contains(columnName)) {
selectedPartitionNames.add(columnName);
} else {
columnIds.add(tableColumnNames.indexOf(columnName));
selectedColumnNames.add(columnName);
}
}
}
ColumnProjectionUtils.appendReadColumns(job, columnIds, selectedColumnNames);
for (String columnName : selectedColumnNames) {
StructField fieldRef = finalOI.getStructFieldRef(columnName);
selectedStructFieldRefs.add(fieldRef);
ObjectInspector fieldOI = fieldRef.getFieldObjectInspector();
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldOI.getTypeName());
selectedColumnObjInspectors.add(fieldOI);
selectedColumnTypes.add(typeInfo);
selectedColumnFieldConverters.add(HiveFieldConverter.create(typeInfo, fragmentContext));
}
for (int i = 0; i < selectedColumnNames.size(); ++i) {
logger.trace("inspector:typeName={}, className={}, TypeInfo: {}, converter:{}", selectedColumnObjInspectors.get(i).getTypeName(), selectedColumnObjInspectors.get(i).getClass().getName(), selectedColumnTypes.get(i).toString(), selectedColumnFieldConverters.get(i).getClass().getName());
}
for (int i = 0; i < table.getPartitionKeys().size(); i++) {
FieldSchema field = table.getPartitionKeys().get(i);
if (selectedPartitionNames.contains(field.getName())) {
TypeInfo pType = TypeInfoUtils.getTypeInfoFromTypeString(field.getType());
selectedPartitionTypes.add(pType);
if (partition != null) {
selectedPartitionValues.add(HiveUtilities.convertPartitionType(pType, partition.getValues().get(i), defaultPartitionValue));
}
}
}
} catch (Exception e) {
throw new ExecutionSetupException("Failure while initializing Hive Reader " + this.getClass().getName(), e);
}
if (!empty) {
try {
reader = (org.apache.hadoop.mapred.RecordReader<Object, Object>) job.getInputFormat().getRecordReader(inputSplit, job, Reporter.NULL);
logger.trace("hive reader created: {} for inputSplit {}", reader.getClass().getName(), inputSplit.toString());
} catch (Exception e) {
throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
}
internalInit(tableProperties, reader);
}
}
Aggregations