use of org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo in project hive by apache.
the class AvroSerializer method serializeMap.
private Object serializeMap(MapTypeInfo typeInfo, MapObjectInspector fieldOI, Object structFieldData, Schema schema) throws AvroSerdeException {
// Avro only allows maps with string keys
if (!mapHasStringKey(fieldOI.getMapKeyObjectInspector())) {
throw new AvroSerdeException("Avro only supports maps with keys as Strings. Current Map is: " + typeInfo.toString());
}
ObjectInspector mapKeyObjectInspector = fieldOI.getMapKeyObjectInspector();
ObjectInspector mapValueObjectInspector = fieldOI.getMapValueObjectInspector();
TypeInfo mapKeyTypeInfo = typeInfo.getMapKeyTypeInfo();
TypeInfo mapValueTypeInfo = typeInfo.getMapValueTypeInfo();
Map<?, ?> map = fieldOI.getMap(structFieldData);
Schema valueType = schema.getValueType();
Map<Object, Object> deserialized = new HashMap<Object, Object>(fieldOI.getMapSize(structFieldData));
for (Map.Entry<?, ?> entry : map.entrySet()) {
deserialized.put(serialize(mapKeyTypeInfo, mapKeyObjectInspector, entry.getKey(), STRING_SCHEMA), serialize(mapValueTypeInfo, mapValueObjectInspector, entry.getValue(), valueType));
}
return deserialized;
}
use of org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo in project hive by apache.
the class AvroDeserializer method deserializeMap.
private Object deserializeMap(Object datum, Schema fileSchema, Schema mapSchema, MapTypeInfo columnType) throws AvroSerdeException {
// Avro only allows maps with Strings for keys, so we only have to worry
// about deserializing the values
Map<String, Object> map = new HashMap<String, Object>();
Map<CharSequence, Object> mapDatum = (Map) datum;
Schema valueSchema = mapSchema.getValueType();
TypeInfo valueTypeInfo = columnType.getMapValueTypeInfo();
for (CharSequence key : mapDatum.keySet()) {
Object value = mapDatum.get(key);
map.put(key.toString(), worker(value, fileSchema == null ? null : fileSchema.getValueType(), valueSchema, valueTypeInfo));
}
return map;
}
use of org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo in project hive by apache.
the class LazyFactory method createLazyObjectInspector.
/**
* Create a hierarchical ObjectInspector for LazyObject with the given typeInfo.
*
* @param typeInfo The type information for the LazyObject
* @param separator The array of separators for delimiting each level
* @param separatorIndex The current level (for separators). List(array), struct uses 1 level of
* separator, and map uses 2 levels: the first one for delimiting entries, the second one
* for delimiting key and values.
* @param lazyParams Params for lazy types
* @param option the {@link ObjectInspectorOption}
* @return The ObjectInspector
* @throws SerDeException
*/
public static ObjectInspector createLazyObjectInspector(TypeInfo typeInfo, int separatorIndex, LazyObjectInspectorParameters lazyParams, ObjectInspectorOptions option) throws SerDeException {
ObjectInspector.Category c = typeInfo.getCategory();
switch(c) {
case PRIMITIVE:
return LazyPrimitiveObjectInspectorFactory.getLazyObjectInspector((PrimitiveTypeInfo) typeInfo, lazyParams);
case MAP:
return LazyObjectInspectorFactory.getLazySimpleMapObjectInspector(createLazyObjectInspector(((MapTypeInfo) typeInfo).getMapKeyTypeInfo(), separatorIndex + 2, lazyParams, option), createLazyObjectInspector(((MapTypeInfo) typeInfo).getMapValueTypeInfo(), separatorIndex + 2, lazyParams, option), LazyUtils.getSeparator(lazyParams.getSeparators(), separatorIndex), LazyUtils.getSeparator(lazyParams.getSeparators(), separatorIndex + 1), lazyParams);
case LIST:
return LazyObjectInspectorFactory.getLazySimpleListObjectInspector(createLazyObjectInspector(((ListTypeInfo) typeInfo).getListElementTypeInfo(), separatorIndex + 1, lazyParams, option), LazyUtils.getSeparator(lazyParams.getSeparators(), separatorIndex), lazyParams);
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());
for (int i = 0; i < fieldTypeInfos.size(); i++) {
fieldObjectInspectors.add(createLazyObjectInspector(fieldTypeInfos.get(i), separatorIndex + 1, lazyParams, option));
}
return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(fieldNames, fieldObjectInspectors, null, LazyUtils.getSeparator(lazyParams.getSeparators(), separatorIndex), lazyParams, option);
case UNION:
UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
List<ObjectInspector> lazyOIs = new ArrayList<ObjectInspector>();
for (TypeInfo uti : unionTypeInfo.getAllUnionObjectTypeInfos()) {
lazyOIs.add(createLazyObjectInspector(uti, separatorIndex + 1, lazyParams, option));
}
return LazyObjectInspectorFactory.getLazyUnionObjectInspector(lazyOIs, LazyUtils.getSeparator(lazyParams.getSeparators(), separatorIndex), lazyParams);
}
throw new RuntimeException("Hive LazySerDe Internal error.");
}
use of org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo in project hive by apache.
the class BinarySortableSerDe method deserialize.
static Object deserialize(InputByteBuffer buffer, TypeInfo type, boolean invert, byte nullMarker, byte notNullMarker, Object reuse) throws IOException {
// Is this field a null?
byte isNull = buffer.read(invert);
if (isNull == nullMarker) {
return null;
}
assert (isNull == notNullMarker);
switch(type.getCategory()) {
case PRIMITIVE:
{
PrimitiveTypeInfo ptype = (PrimitiveTypeInfo) type;
switch(ptype.getPrimitiveCategory()) {
case VOID:
{
return null;
}
case BOOLEAN:
{
BooleanWritable r = reuse == null ? new BooleanWritable() : (BooleanWritable) reuse;
byte b = buffer.read(invert);
assert (b == 1 || b == 2);
r.set(b == 2);
return r;
}
case BYTE:
{
ByteWritable r = reuse == null ? new ByteWritable() : (ByteWritable) reuse;
r.set((byte) (buffer.read(invert) ^ 0x80));
return r;
}
case SHORT:
{
ShortWritable r = reuse == null ? new ShortWritable() : (ShortWritable) reuse;
int v = buffer.read(invert) ^ 0x80;
v = (v << 8) + (buffer.read(invert) & 0xff);
r.set((short) v);
return r;
}
case INT:
{
IntWritable r = reuse == null ? new IntWritable() : (IntWritable) reuse;
r.set(deserializeInt(buffer, invert));
return r;
}
case LONG:
{
LongWritable r = reuse == null ? new LongWritable() : (LongWritable) reuse;
r.set(deserializeLong(buffer, invert));
return r;
}
case FLOAT:
{
FloatWritable r = reuse == null ? new FloatWritable() : (FloatWritable) reuse;
int v = 0;
for (int i = 0; i < 4; i++) {
v = (v << 8) + (buffer.read(invert) & 0xff);
}
if ((v & (1 << 31)) == 0) {
// negative number, flip all bits
v = ~v;
} else {
// positive number, flip the first bit
v = v ^ (1 << 31);
}
r.set(Float.intBitsToFloat(v));
return r;
}
case DOUBLE:
{
DoubleWritable r = reuse == null ? new DoubleWritable() : (DoubleWritable) reuse;
long v = 0;
for (int i = 0; i < 8; i++) {
v = (v << 8) + (buffer.read(invert) & 0xff);
}
if ((v & (1L << 63)) == 0) {
// negative number, flip all bits
v = ~v;
} else {
// positive number, flip the first bit
v = v ^ (1L << 63);
}
r.set(Double.longBitsToDouble(v));
return r;
}
case STRING:
{
Text r = reuse == null ? new Text() : (Text) reuse;
return deserializeText(buffer, invert, r);
}
case CHAR:
{
HiveCharWritable r = reuse == null ? new HiveCharWritable() : (HiveCharWritable) reuse;
// Use internal text member to read value
deserializeText(buffer, invert, r.getTextValue());
r.enforceMaxLength(getCharacterMaxLength(type));
return r;
}
case VARCHAR:
{
HiveVarcharWritable r = reuse == null ? new HiveVarcharWritable() : (HiveVarcharWritable) reuse;
// Use HiveVarchar's internal Text member to read the value.
deserializeText(buffer, invert, r.getTextValue());
// If we cache helper data for deserialization we could avoid having
// to call getVarcharMaxLength() on every deserialize call.
r.enforceMaxLength(getCharacterMaxLength(type));
return r;
}
case BINARY:
{
BytesWritable bw = new BytesWritable();
// Get the actual length first
int start = buffer.tell();
int length = 0;
do {
byte b = buffer.read(invert);
if (b == 0) {
// end of string
break;
}
if (b == 1) {
// the last char is an escape char. read the actual char
buffer.read(invert);
}
length++;
} while (true);
if (length == buffer.tell() - start) {
// No escaping happened, so we are already done.
bw.set(buffer.getData(), start, length);
} else {
// Escaping happened, we need to copy byte-by-byte.
// 1. Set the length first.
bw.set(buffer.getData(), start, length);
// 2. Reset the pointer.
buffer.seek(start);
// 3. Copy the data.
byte[] rdata = bw.getBytes();
for (int i = 0; i < length; i++) {
byte b = buffer.read(invert);
if (b == 1) {
// The last char is an escape char, read the actual char.
// The serialization format escape \0 to \1, and \1 to \2,
// to make sure the string is null-terminated.
b = (byte) (buffer.read(invert) - 1);
}
rdata[i] = b;
}
// 4. Read the null terminator.
byte b = buffer.read(invert);
assert (b == 0);
}
return bw;
}
case DATE:
{
DateWritable d = reuse == null ? new DateWritable() : (DateWritable) reuse;
d.set(deserializeInt(buffer, invert));
return d;
}
case TIMESTAMP:
TimestampWritable t = (reuse == null ? new TimestampWritable() : (TimestampWritable) reuse);
byte[] bytes = new byte[TimestampWritable.BINARY_SORTABLE_LENGTH];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = buffer.read(invert);
}
t.setBinarySortable(bytes, 0);
return t;
case INTERVAL_YEAR_MONTH:
{
HiveIntervalYearMonthWritable i = reuse == null ? new HiveIntervalYearMonthWritable() : (HiveIntervalYearMonthWritable) reuse;
i.set(deserializeInt(buffer, invert));
return i;
}
case INTERVAL_DAY_TIME:
{
HiveIntervalDayTimeWritable i = reuse == null ? new HiveIntervalDayTimeWritable() : (HiveIntervalDayTimeWritable) reuse;
long totalSecs = deserializeLong(buffer, invert);
int nanos = deserializeInt(buffer, invert);
i.set(totalSecs, nanos);
return i;
}
case DECIMAL:
{
// See serialization of decimal for explanation (below)
HiveDecimalWritable bdw = (reuse == null ? new HiveDecimalWritable() : (HiveDecimalWritable) reuse);
int b = buffer.read(invert) - 1;
assert (b == 1 || b == -1 || b == 0);
boolean positive = b != -1;
int factor = buffer.read(invert) ^ 0x80;
for (int i = 0; i < 3; i++) {
factor = (factor << 8) + (buffer.read(invert) & 0xff);
}
if (!positive) {
factor = -factor;
}
int start = buffer.tell();
int length = 0;
do {
b = buffer.read(positive ? invert : !invert);
assert (b != 1);
if (b == 0) {
// end of digits
break;
}
length++;
} while (true);
final byte[] decimalBuffer = new byte[length];
buffer.seek(start);
for (int i = 0; i < length; ++i) {
decimalBuffer[i] = buffer.read(positive ? invert : !invert);
}
// read the null byte again
buffer.read(positive ? invert : !invert);
String digits = new String(decimalBuffer, 0, length, decimalCharSet);
BigInteger bi = new BigInteger(digits);
HiveDecimal bd = HiveDecimal.create(bi).scaleByPowerOfTen(factor - length);
if (!positive) {
bd = bd.negate();
}
bdw.set(bd);
return bdw;
}
default:
{
throw new RuntimeException("Unrecognized type: " + ptype.getPrimitiveCategory());
}
}
}
case LIST:
{
ListTypeInfo ltype = (ListTypeInfo) type;
TypeInfo etype = ltype.getListElementTypeInfo();
// Create the list if needed
ArrayList<Object> r = reuse == null ? new ArrayList<Object>() : (ArrayList<Object>) reuse;
// Read the list
int size = 0;
while (true) {
int more = buffer.read(invert);
if (more == 0) {
// \0 to terminate
break;
}
// \1 followed by each element
assert (more == 1);
if (size == r.size()) {
r.add(null);
}
r.set(size, deserialize(buffer, etype, invert, nullMarker, notNullMarker, r.get(size)));
size++;
}
// Remove additional elements if the list is reused
while (r.size() > size) {
r.remove(r.size() - 1);
}
return r;
}
case MAP:
{
MapTypeInfo mtype = (MapTypeInfo) type;
TypeInfo ktype = mtype.getMapKeyTypeInfo();
TypeInfo vtype = mtype.getMapValueTypeInfo();
// Create the map if needed
Map<Object, Object> r;
if (reuse == null) {
r = new HashMap<Object, Object>();
} else {
r = (HashMap<Object, Object>) reuse;
r.clear();
}
while (true) {
int more = buffer.read(invert);
if (more == 0) {
// \0 to terminate
break;
}
// \1 followed by each key and then each value
assert (more == 1);
Object k = deserialize(buffer, ktype, invert, nullMarker, notNullMarker, null);
Object v = deserialize(buffer, vtype, invert, nullMarker, notNullMarker, null);
r.put(k, v);
}
return r;
}
case STRUCT:
{
StructTypeInfo stype = (StructTypeInfo) type;
List<TypeInfo> fieldTypes = stype.getAllStructFieldTypeInfos();
int size = fieldTypes.size();
// Create the struct if needed
ArrayList<Object> r = reuse == null ? new ArrayList<Object>(size) : (ArrayList<Object>) reuse;
assert (r.size() <= size);
// Set the size of the struct
while (r.size() < size) {
r.add(null);
}
// Read one field by one field
for (int eid = 0; eid < size; eid++) {
r.set(eid, deserialize(buffer, fieldTypes.get(eid), invert, nullMarker, notNullMarker, r.get(eid)));
}
return r;
}
case UNION:
{
UnionTypeInfo utype = (UnionTypeInfo) type;
StandardUnion r = reuse == null ? new StandardUnion() : (StandardUnion) reuse;
// Read the tag
byte tag = buffer.read(invert);
r.setTag(tag);
r.setObject(deserialize(buffer, utype.getAllUnionObjectTypeInfos().get(tag), invert, nullMarker, notNullMarker, null));
return r;
}
default:
{
throw new RuntimeException("Unrecognized type: " + type.getCategory());
}
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo in project hive by apache.
the class BatchToRowReader method nextMap.
private HashMap<Object, Object> nextMap(ColumnVector vector, int row, MapTypeInfo schema, Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
MapColumnVector map = (MapColumnVector) vector;
int length = (int) map.lengths[row];
int offset = (int) map.offsets[row];
TypeInfo keyType = schema.getMapKeyTypeInfo();
TypeInfo valueType = schema.getMapValueTypeInfo();
HashMap<Object, Object> result;
if (previous == null || previous.getClass() != HashMap.class) {
result = new HashMap<Object, Object>(length);
} else {
result = (HashMap<Object, Object>) previous;
// I couldn't think of a good way to reuse the keys and value objects
// without even more allocations, so take the easy and safe approach.
result.clear();
}
for (int e = 0; e < length; ++e) {
result.put(nextValue(map.keys, e + offset, keyType, null), nextValue(map.values, e + offset, valueType, null));
}
return result;
} else {
return null;
}
}
Aggregations