use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE in project hive by apache.
the class LazyHBaseCellMap method getMapValueElement.
/**
* Get the value in the map for the given key.
*
* @param key
* @return
*/
@Override
public Object getMapValueElement(Object key) {
if (!getParsed()) {
parse();
}
for (Map.Entry<Object, Object> entry : cachedMap.entrySet()) {
LazyPrimitive<?, ?> lazyKeyI = (LazyPrimitive<?, ?>) entry.getKey();
// getWritableObject() will convert LazyPrimitive to actual primitive
// writable objects.
Object keyI = lazyKeyI.getWritableObject();
if (keyI == null) {
continue;
}
if (keyI.equals(key)) {
// Got a match, return the value
Object _value = entry.getValue();
// Else return it as it is.
if (_value instanceof LazyObject) {
LazyObject<?> v = (LazyObject<?>) entry.getValue();
return v == null ? v : v.getObject();
} else {
return _value;
}
}
}
return null;
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE in project hive by apache.
the class TestLazyHBaseObject method testLazyHBaseRow3.
/**
* Test the LazyHBaseRow class with a one-to-one/onto mapping between Hive columns and
* HBase column family/column qualifier pairs. The column types are primitive and fields
* are stored in binary format in HBase.
* @throws SerDeException
*/
@Test
public void testLazyHBaseRow3() throws SerDeException {
List<TypeInfo> fieldTypeInfos = TypeInfoUtils.getTypeInfosFromTypeString("string,int,tinyint,smallint,bigint,float,double,string,boolean");
List<String> fieldNames = Arrays.asList(new String[] { "key", "c_int", "c_byte", "c_short", "c_long", "c_float", "c_double", "c_string", "c_bool" });
Text nullSequence = new Text("\\N");
String hbaseColumnsMapping = ":key#str,cf-int:cq-int#bin,cf-byte:cq-byte#bin," + "cf-short:cq-short#bin,cf-long:cq-long#bin,cf-float:cq-float#bin,cf-double:cq-double#bin," + "cf-string:cq-string#str,cf-bool:cq-bool#bin";
ColumnMappings columnMappings = null;
try {
columnMappings = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping);
} catch (SerDeException e) {
fail(e.toString());
}
ColumnMapping[] columnsMapping = columnMappings.getColumnsMapping();
for (int i = 0; i < columnsMapping.length; i++) {
ColumnMapping colMap = columnsMapping[i];
if (i == 0 || i == 7) {
colMap.binaryStorage.add(false);
} else {
colMap.binaryStorage.add(true);
}
}
ObjectInspector oi = LazyFactory.createLazyStructInspector(fieldNames, fieldTypeInfos, new byte[] { ' ', ':', '=' }, nullSequence, false, false, (byte) 0);
LazyHBaseRow o = new LazyHBaseRow((LazySimpleStructObjectInspector) oi, columnMappings);
byte[] rowKey = "row-key".getBytes();
List<Cell> kvs = new ArrayList<Cell>();
byte[] value;
for (int i = 1; i < columnsMapping.length; i++) {
switch(i) {
case 1:
value = Bytes.toBytes(1);
break;
case 2:
value = new byte[] { (byte) 1 };
break;
case 3:
value = Bytes.toBytes((short) 1);
break;
case 4:
value = Bytes.toBytes((long) 1);
break;
case 5:
value = Bytes.toBytes(1.0F);
break;
case 6:
value = Bytes.toBytes(1.0);
break;
case 7:
value = "Hadoop, Hive, with HBase storage handler.".getBytes();
break;
case 8:
value = Bytes.toBytes(true);
break;
default:
throw new RuntimeException("Not expected: " + i);
}
ColumnMapping colMap = columnsMapping[i];
kvs.add(new KeyValue(rowKey, colMap.familyNameBytes, colMap.qualifierNameBytes, value));
}
Collections.sort(kvs, KeyValue.COMPARATOR);
Result result = Result.create(kvs);
o.init(result);
List<? extends StructField> fieldRefs = ((StructObjectInspector) oi).getAllStructFieldRefs();
for (int i = 0; i < fieldRefs.size(); i++) {
Object fieldData = ((StructObjectInspector) oi).getStructFieldData(o, fieldRefs.get(i));
assert (fieldData != null);
assert (fieldData instanceof LazyPrimitive<?, ?>);
Writable writable = ((LazyPrimitive<?, ?>) fieldData).getWritableObject();
switch(i) {
case 0:
Text text = new Text("row-key");
assertEquals(text, writable);
break;
case 1:
IntWritable iw = new IntWritable(1);
assertEquals(iw, writable);
break;
case 2:
ByteWritable bw = new ByteWritable((byte) 1);
assertEquals(bw, writable);
break;
case 3:
ShortWritable sw = new ShortWritable((short) 1);
assertEquals(sw, writable);
break;
case 4:
LongWritable lw = new LongWritable(1);
assertEquals(lw, writable);
break;
case 5:
FloatWritable fw = new FloatWritable(1.0F);
assertEquals(fw, writable);
break;
case 6:
DoubleWritable dw = new DoubleWritable(1.0);
assertEquals(dw, writable);
break;
case 7:
Text t = new Text("Hadoop, Hive, with HBase storage handler.");
assertEquals(t, writable);
break;
case 8:
BooleanWritable boolWritable = new BooleanWritable(true);
assertEquals(boolWritable, writable);
break;
default:
fail("Error: Unanticipated value in deserializing fields for HBaseSerDe.");
break;
}
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE in project hive by apache.
the class LlapRowRecordReader method convertValue.
static Object convertValue(Object val, ObjectInspector oi) {
if (val == null) {
return null;
}
Object convertedVal = null;
ObjectInspector.Category oiCategory = oi.getCategory();
switch(oiCategory) {
case PRIMITIVE:
convertedVal = convertPrimitive(val, (PrimitiveObjectInspector) oi);
break;
case LIST:
ListObjectInspector loi = (ListObjectInspector) oi;
int listSize = loi.getListLength(val);
// Per ListObjectInpsector.getListLength(), -1 length means null list.
if (listSize < 0) {
return null;
}
List<Object> convertedList = new ArrayList<Object>(listSize);
ObjectInspector listElementOI = loi.getListElementObjectInspector();
for (int idx = 0; idx < listSize; ++idx) {
convertedList.add(convertValue(loi.getListElement(val, idx), listElementOI));
}
convertedVal = convertedList;
break;
case MAP:
MapObjectInspector moi = (MapObjectInspector) oi;
int mapSize = moi.getMapSize(val);
// Per MapObjectInpsector.getMapSize(), -1 length means null map.
if (mapSize < 0) {
return null;
}
Map<Object, Object> convertedMap = new LinkedHashMap<Object, Object>(mapSize);
ObjectInspector mapKeyOI = moi.getMapKeyObjectInspector();
ObjectInspector mapValOI = moi.getMapValueObjectInspector();
Map<?, ?> mapCol = moi.getMap(val);
for (Object mapKey : mapCol.keySet()) {
Object convertedMapKey = convertValue(mapKey, mapKeyOI);
Object convertedMapVal = convertValue(mapCol.get(mapKey), mapValOI);
convertedMap.put(convertedMapKey, convertedMapVal);
}
convertedVal = convertedMap;
break;
case STRUCT:
StructObjectInspector soi = (StructObjectInspector) oi;
List<Object> convertedRow = new ArrayList<Object>();
for (StructField structField : soi.getAllStructFieldRefs()) {
Object convertedFieldValue = convertValue(soi.getStructFieldData(val, structField), structField.getFieldObjectInspector());
convertedRow.add(convertedFieldValue);
}
convertedVal = convertedRow;
break;
default:
throw new IllegalArgumentException("Cannot convert type " + oiCategory);
}
return convertedVal;
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE in project hive by apache.
the class DDLPlanUtils method formatType.
/**
* Struct fields are identifiers, need to be put between ``.
*/
private String formatType(TypeInfo typeInfo) throws HiveException {
switch(typeInfo.getCategory()) {
case PRIMITIVE:
return typeInfo.getTypeName();
case STRUCT:
StringBuilder structFormattedType = new StringBuilder();
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
for (int i = 0; i < structTypeInfo.getAllStructFieldNames().size(); i++) {
if (structFormattedType.length() != 0) {
structFormattedType.append(", ");
}
String structElementName = structTypeInfo.getAllStructFieldNames().get(i);
String structElementType = formatType(structTypeInfo.getAllStructFieldTypeInfos().get(i));
structFormattedType.append("`" + structElementName + "`:" + structElementType);
}
return "struct<" + structFormattedType.toString() + ">";
case LIST:
ListTypeInfo listTypeInfo = (ListTypeInfo) typeInfo;
String elementType = formatType(listTypeInfo.getListElementTypeInfo());
return "array<" + elementType + ">";
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
String keyTypeInfo = mapTypeInfo.getMapKeyTypeInfo().getTypeName();
String valueTypeInfo = formatType(mapTypeInfo.getMapValueTypeInfo());
return "map<" + keyTypeInfo + "," + valueTypeInfo + ">";
case UNION:
StringBuilder unionFormattedType = new StringBuilder();
UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
for (TypeInfo unionElementTypeInfo : unionTypeInfo.getAllUnionObjectTypeInfos()) {
if (unionFormattedType.length() != 0) {
unionFormattedType.append(", ");
}
String unionElementType = formatType(unionElementTypeInfo);
unionFormattedType.append(unionElementType);
}
return "uniontype<" + unionFormattedType.toString() + ">";
default:
throw new RuntimeException("Unknown type: " + typeInfo.getCategory());
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category.PRIMITIVE in project hive by apache.
the class HiveIcebergStorageHandler method collectColumnAndReplaceDummyValues.
/**
* Recursively replaces the ExprNodeDynamicListDesc nodes by a dummy ExprNodeConstantDesc so we can test if we can
* convert the predicate to an Iceberg predicate when pruning the partitions later. Also collects the column names
* in the filter.
* <p>
* Please make sure that it is ok to change the input node (clone if needed)
* @param node The node we are traversing
* @param foundColumn The column we already found
*/
private String collectColumnAndReplaceDummyValues(ExprNodeDesc node, String foundColumn) {
String column = foundColumn;
List<ExprNodeDesc> children = node.getChildren();
if (children != null && !children.isEmpty()) {
ListIterator<ExprNodeDesc> iterator = children.listIterator();
while (iterator.hasNext()) {
ExprNodeDesc child = iterator.next();
if (child instanceof ExprNodeDynamicListDesc) {
Object dummy;
switch(((PrimitiveTypeInfo) child.getTypeInfo()).getPrimitiveCategory()) {
case INT:
case SHORT:
dummy = 1;
break;
case LONG:
dummy = 1L;
break;
case TIMESTAMP:
case TIMESTAMPLOCALTZ:
dummy = new Timestamp();
break;
case CHAR:
case VARCHAR:
case STRING:
dummy = "1";
break;
case DOUBLE:
case FLOAT:
case DECIMAL:
dummy = 1.1;
break;
case DATE:
dummy = new Date();
break;
case BOOLEAN:
dummy = true;
break;
default:
throw new UnsupportedOperationException("Not supported primitive type in partition pruning: " + child.getTypeInfo());
}
iterator.set(new ExprNodeConstantDesc(child.getTypeInfo(), dummy));
} else {
String newColumn;
if (child instanceof ExprNodeColumnDesc) {
newColumn = ((ExprNodeColumnDesc) child).getColumn();
} else {
newColumn = collectColumnAndReplaceDummyValues(child, column);
}
if (column != null && newColumn != null && !newColumn.equals(column)) {
throw new UnsupportedOperationException("Partition pruning does not support filtering for more columns");
}
if (column == null) {
column = newColumn;
}
}
}
}
return column;
}
Aggregations