use of org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector in project presto by prestodb.
the class TestHiveFileFormats method hasType.
public static boolean hasType(ObjectInspector objectInspector, PrimitiveCategory... types) {
if (objectInspector instanceof PrimitiveObjectInspector) {
PrimitiveObjectInspector primitiveInspector = (PrimitiveObjectInspector) objectInspector;
PrimitiveCategory primitiveCategory = primitiveInspector.getPrimitiveCategory();
for (PrimitiveCategory type : types) {
if (primitiveCategory == type) {
return true;
}
}
return false;
}
if (objectInspector instanceof ListObjectInspector) {
ListObjectInspector listInspector = (ListObjectInspector) objectInspector;
return hasType(listInspector.getListElementObjectInspector(), types);
}
if (objectInspector instanceof MapObjectInspector) {
MapObjectInspector mapInspector = (MapObjectInspector) objectInspector;
return hasType(mapInspector.getMapKeyObjectInspector(), types) || hasType(mapInspector.getMapValueObjectInspector(), types);
}
if (objectInspector instanceof StructObjectInspector) {
for (StructField field : ((StructObjectInspector) objectInspector).getAllStructFieldRefs()) {
if (hasType(field.getFieldObjectInspector(), types)) {
return true;
}
}
return false;
}
throw new IllegalArgumentException("Unknown object inspector type " + objectInspector);
}
use of org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector in project presto by prestodb.
the class BlockInputDecoders method createForMap.
private static BlockInputDecoder createForMap(MapObjectInspector inspector, MapType mapType) {
Type keyType = mapType.getKeyType();
Type valueType = mapType.getValueType();
ObjectInspector keyInspector = inspector.getMapKeyObjectInspector();
ObjectInspector valueInspector = inspector.getMapValueObjectInspector();
BlockInputDecoder keyDecoder = createBlockInputDecoder(keyInspector, keyType);
BlockInputDecoder valueDecoder = createBlockInputDecoder(valueInspector, valueType);
return (b, i) -> {
if (b.isNull(i)) {
return null;
}
SingleMapBlock single = (SingleMapBlock) b.getBlock(i);
int positions = single.getPositionCount();
HashMap<Object, Object> result = new HashMap<>();
for (int j = 0; j < positions; j += 2) {
Object key = keyDecoder.decode(single, j);
Object value = valueDecoder.decode(single, j + 1);
if (key != null) {
result.put(key, value);
}
}
return result;
};
}
use of org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector in project presto by prestodb.
the class SerDeUtils method serializeMap.
private static Block serializeMap(Type type, BlockBuilder builder, Object object, MapObjectInspector inspector, boolean filterNullMapKeys) {
Map<?, ?> map = inspector.getMap(object);
if (map == null) {
requireNonNull(builder, "parent builder is null").appendNull();
return null;
}
List<Type> typeParameters = type.getTypeParameters();
checkArgument(typeParameters.size() == 2, "map must have exactly 2 type parameter");
Type keyType = typeParameters.get(0);
Type valueType = typeParameters.get(1);
ObjectInspector keyInspector = inspector.getMapKeyObjectInspector();
ObjectInspector valueInspector = inspector.getMapValueObjectInspector();
BlockBuilder currentBuilder;
boolean builderSynthesized = false;
if (builder == null) {
builderSynthesized = true;
builder = type.createBlockBuilder(null, 1);
}
currentBuilder = builder.beginBlockEntry();
for (Map.Entry<?, ?> entry : map.entrySet()) {
// Hive skips map entries with null keys
if (!filterNullMapKeys || entry.getKey() != null) {
serializeObject(keyType, currentBuilder, entry.getKey(), keyInspector);
serializeObject(valueType, currentBuilder, entry.getValue(), valueInspector);
}
}
builder.closeEntry();
if (builderSynthesized) {
return (Block) type.getObject(builder, 0);
} else {
return null;
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector in project hive by apache.
the class AccumuloRowSerializer method writeWithLevel.
/**
* Recursively serialize an Object using its {@link ObjectInspector}, respecting the
* separators defined by the {@link LazySerDeParameters}.
* @param oi ObjectInspector for the current object
* @param value The current object
* @param output A buffer output is written to
* @param mapping The mapping for this Hive column
* @param level The current level/offset for the SerDe separator
* @throws IOException
*/
protected void writeWithLevel(ObjectInspector oi, Object value, ByteStream.Output output, ColumnMapping mapping, int level) throws IOException {
switch(oi.getCategory()) {
case PRIMITIVE:
if (mapping.getEncoding() == ColumnEncoding.BINARY) {
this.writeBinary(output, value, (PrimitiveObjectInspector) oi);
} else {
this.writeString(output, value, (PrimitiveObjectInspector) oi);
}
return;
case LIST:
char separator = (char) serDeParams.getSeparators()[level];
ListObjectInspector loi = (ListObjectInspector) oi;
List<?> list = loi.getList(value);
ObjectInspector eoi = loi.getListElementObjectInspector();
if (list == null) {
log.debug("No objects found when serializing list");
return;
} else {
for (int i = 0; i < list.size(); i++) {
if (i > 0) {
output.write(separator);
}
writeWithLevel(eoi, list.get(i), output, mapping, level + 1);
}
}
return;
case MAP:
char sep = (char) serDeParams.getSeparators()[level];
char keyValueSeparator = (char) serDeParams.getSeparators()[level + 1];
MapObjectInspector moi = (MapObjectInspector) oi;
ObjectInspector koi = moi.getMapKeyObjectInspector();
ObjectInspector voi = moi.getMapValueObjectInspector();
Map<?, ?> map = moi.getMap(value);
if (map == null) {
log.debug("No object found when serializing map");
return;
} else {
boolean first = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (first) {
first = false;
} else {
output.write(sep);
}
writeWithLevel(koi, entry.getKey(), output, mapping, level + 2);
output.write(keyValueSeparator);
writeWithLevel(voi, entry.getValue(), output, mapping, level + 2);
}
}
return;
case STRUCT:
sep = (char) serDeParams.getSeparators()[level];
StructObjectInspector soi = (StructObjectInspector) oi;
List<? extends StructField> fields = soi.getAllStructFieldRefs();
list = soi.getStructFieldsDataAsList(value);
if (list == null) {
log.debug("No object found when serializing struct");
return;
} else {
for (int i = 0; i < list.size(); i++) {
if (i > 0) {
output.write(sep);
}
writeWithLevel(fields.get(i).getFieldObjectInspector(), list.get(i), output, mapping, level + 1);
}
}
return;
default:
throw new RuntimeException("Unknown category type: " + oi.getCategory());
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector in project hive by apache.
the class VectorExtractRow method extractRowColumn.
public Object extractRowColumn(ColumnVector colVector, TypeInfo typeInfo, ObjectInspector objectInspector, int batchIndex) {
if (colVector == null) {
// may ask for them..
return null;
}
final int adjustedIndex = (colVector.isRepeating ? 0 : batchIndex);
if (!colVector.noNulls && colVector.isNull[adjustedIndex]) {
return null;
}
final Category category = typeInfo.getCategory();
switch(category) {
case PRIMITIVE:
{
final PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
final PrimitiveCategory primitiveCategory = primitiveTypeInfo.getPrimitiveCategory();
final Writable primitiveWritable = VectorizedBatchUtil.getPrimitiveWritable(primitiveCategory);
switch(primitiveCategory) {
case VOID:
return null;
case BOOLEAN:
((BooleanWritable) primitiveWritable).set(((LongColumnVector) colVector).vector[adjustedIndex] == 0 ? false : true);
return primitiveWritable;
case BYTE:
((ByteWritable) primitiveWritable).set((byte) ((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case SHORT:
((ShortWritable) primitiveWritable).set((short) ((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case INT:
((IntWritable) primitiveWritable).set((int) ((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case LONG:
((LongWritable) primitiveWritable).set(((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case TIMESTAMP:
// From java.sql.Timestamp used by vectorization to serializable org.apache.hadoop.hive.common.type.Timestamp
java.sql.Timestamp ts = ((TimestampColumnVector) colVector).asScratchTimestamp(adjustedIndex);
Timestamp serializableTS = Timestamp.ofEpochMilli(ts.getTime(), ts.getNanos());
((TimestampWritableV2) primitiveWritable).set(serializableTS);
return primitiveWritable;
case DATE:
((DateWritableV2) primitiveWritable).set((int) ((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case FLOAT:
((FloatWritable) primitiveWritable).set((float) ((DoubleColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case DOUBLE:
((DoubleWritable) primitiveWritable).set(((DoubleColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case BINARY:
{
final BytesColumnVector bytesColVector = ((BytesColumnVector) colVector);
final byte[] bytes = bytesColVector.vector[adjustedIndex];
final int start = bytesColVector.start[adjustedIndex];
final int length = bytesColVector.length[adjustedIndex];
BytesWritable bytesWritable = (BytesWritable) primitiveWritable;
if (bytes == null || length == 0) {
if (length > 0) {
nullBytesReadError(primitiveCategory, batchIndex);
}
bytesWritable.set(EMPTY_BYTES, 0, 0);
} else {
bytesWritable.set(bytes, start, length);
}
return primitiveWritable;
}
case STRING:
{
final BytesColumnVector bytesColVector = ((BytesColumnVector) colVector);
final byte[] bytes = bytesColVector.vector[adjustedIndex];
final int start = bytesColVector.start[adjustedIndex];
final int length = bytesColVector.length[adjustedIndex];
if (bytes == null || length == 0) {
if (length > 0) {
nullBytesReadError(primitiveCategory, batchIndex);
}
((Text) primitiveWritable).set(EMPTY_BYTES, 0, 0);
} else {
// Use org.apache.hadoop.io.Text as our helper to go from byte[] to String.
((Text) primitiveWritable).set(bytes, start, length);
}
return primitiveWritable;
}
case VARCHAR:
{
final BytesColumnVector bytesColVector = ((BytesColumnVector) colVector);
final byte[] bytes = bytesColVector.vector[adjustedIndex];
final int start = bytesColVector.start[adjustedIndex];
final int length = bytesColVector.length[adjustedIndex];
final HiveVarcharWritable hiveVarcharWritable = (HiveVarcharWritable) primitiveWritable;
if (bytes == null || length == 0) {
if (length > 0) {
nullBytesReadError(primitiveCategory, batchIndex);
}
hiveVarcharWritable.set(EMPTY_STRING, -1);
} else {
final int adjustedLength = StringExpr.truncate(bytes, start, length, ((VarcharTypeInfo) primitiveTypeInfo).getLength());
if (adjustedLength == 0) {
hiveVarcharWritable.set(EMPTY_STRING, -1);
} else {
hiveVarcharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), -1);
}
}
return primitiveWritable;
}
case CHAR:
{
final BytesColumnVector bytesColVector = ((BytesColumnVector) colVector);
final byte[] bytes = bytesColVector.vector[adjustedIndex];
final int start = bytesColVector.start[adjustedIndex];
final int length = bytesColVector.length[adjustedIndex];
final HiveCharWritable hiveCharWritable = (HiveCharWritable) primitiveWritable;
final int maxLength = ((CharTypeInfo) primitiveTypeInfo).getLength();
if (bytes == null || length == 0) {
if (length > 0) {
nullBytesReadError(primitiveCategory, batchIndex);
}
hiveCharWritable.set(EMPTY_STRING, maxLength);
} else {
final int adjustedLength = StringExpr.rightTrimAndTruncate(bytes, start, length, ((CharTypeInfo) primitiveTypeInfo).getLength());
if (adjustedLength == 0) {
hiveCharWritable.set(EMPTY_STRING, maxLength);
} else {
hiveCharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), maxLength);
}
}
return primitiveWritable;
}
case DECIMAL:
if (colVector instanceof Decimal64ColumnVector) {
Decimal64ColumnVector dec32ColVector = (Decimal64ColumnVector) colVector;
((HiveDecimalWritable) primitiveWritable).deserialize64(dec32ColVector.vector[adjustedIndex], dec32ColVector.scale);
} else {
// The HiveDecimalWritable set method will quickly copy the deserialized decimal writable fields.
((HiveDecimalWritable) primitiveWritable).set(((DecimalColumnVector) colVector).vector[adjustedIndex]);
}
return primitiveWritable;
case INTERVAL_YEAR_MONTH:
((HiveIntervalYearMonthWritable) primitiveWritable).set((int) ((LongColumnVector) colVector).vector[adjustedIndex]);
return primitiveWritable;
case INTERVAL_DAY_TIME:
((HiveIntervalDayTimeWritable) primitiveWritable).set(((IntervalDayTimeColumnVector) colVector).asScratchIntervalDayTime(adjustedIndex));
return primitiveWritable;
default:
throw new RuntimeException("Primitive category " + primitiveCategory.name() + " not supported");
}
}
case LIST:
{
final ListColumnVector listColumnVector = (ListColumnVector) colVector;
final ListTypeInfo listTypeInfo = (ListTypeInfo) typeInfo;
final ListObjectInspector listObjectInspector = (ListObjectInspector) objectInspector;
final int offset = (int) listColumnVector.offsets[adjustedIndex];
final int size = (int) listColumnVector.lengths[adjustedIndex];
final List list = new ArrayList();
for (int i = 0; i < size; i++) {
list.add(extractRowColumn(listColumnVector.child, listTypeInfo.getListElementTypeInfo(), listObjectInspector.getListElementObjectInspector(), offset + i));
}
return list;
}
case MAP:
{
final MapColumnVector mapColumnVector = (MapColumnVector) colVector;
final MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
final MapObjectInspector mapObjectInspector = (MapObjectInspector) objectInspector;
final int offset = (int) mapColumnVector.offsets[adjustedIndex];
final int size = (int) mapColumnVector.lengths[adjustedIndex];
final Map<Object, Object> map = new LinkedHashMap<Object, Object>();
for (int i = 0; i < size; i++) {
final Object key = extractRowColumn(mapColumnVector.keys, mapTypeInfo.getMapKeyTypeInfo(), mapObjectInspector.getMapKeyObjectInspector(), offset + i);
final Object value = extractRowColumn(mapColumnVector.values, mapTypeInfo.getMapValueTypeInfo(), mapObjectInspector.getMapValueObjectInspector(), offset + i);
map.put(key, value);
}
return map;
}
case STRUCT:
{
final StructColumnVector structColumnVector = (StructColumnVector) colVector;
final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
final StandardStructObjectInspector structInspector = (StandardStructObjectInspector) objectInspector;
final List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
final int size = fieldTypeInfos.size();
final List<? extends StructField> structFields = structInspector.getAllStructFieldRefs();
final Object struct = structInspector.create();
for (int i = 0; i < size; i++) {
final StructField structField = structFields.get(i);
final TypeInfo fieldTypeInfo = fieldTypeInfos.get(i);
final Object value = extractRowColumn(structColumnVector.fields[i], fieldTypeInfo, structField.getFieldObjectInspector(), adjustedIndex);
structInspector.setStructFieldData(struct, structField, value);
}
return struct;
}
case UNION:
{
final UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
final List<TypeInfo> objectTypeInfos = unionTypeInfo.getAllUnionObjectTypeInfos();
final UnionObjectInspector unionInspector = (UnionObjectInspector) objectInspector;
final List<ObjectInspector> unionInspectors = unionInspector.getObjectInspectors();
final UnionColumnVector unionColumnVector = (UnionColumnVector) colVector;
final byte tag = (byte) unionColumnVector.tags[adjustedIndex];
final Object object = extractRowColumn(unionColumnVector.fields[tag], objectTypeInfos.get(tag), unionInspectors.get(tag), adjustedIndex);
final StandardUnion standardUnion = new StandardUnion();
standardUnion.setTag(tag);
standardUnion.setObject(object);
return standardUnion;
}
default:
throw new RuntimeException("Category " + category.name() + " not supported");
}
}
Aggregations