use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class BinarySortableSerDe method deserialize.
static Object deserialize(InputByteBuffer buffer, TypeInfo type, boolean invert, byte nullMarker, byte notNullMarker, Object reuse) throws IOException {
// Is this field a null?
byte isNull = buffer.read(invert);
if (isNull == nullMarker) {
return null;
}
assert (isNull == notNullMarker);
switch(type.getCategory()) {
case PRIMITIVE:
{
PrimitiveTypeInfo ptype = (PrimitiveTypeInfo) type;
switch(ptype.getPrimitiveCategory()) {
case VOID:
{
return null;
}
case BOOLEAN:
{
BooleanWritable r = reuse == null ? new BooleanWritable() : (BooleanWritable) reuse;
byte b = buffer.read(invert);
assert (b == 1 || b == 2);
r.set(b == 2);
return r;
}
case BYTE:
{
ByteWritable r = reuse == null ? new ByteWritable() : (ByteWritable) reuse;
r.set((byte) (buffer.read(invert) ^ 0x80));
return r;
}
case SHORT:
{
ShortWritable r = reuse == null ? new ShortWritable() : (ShortWritable) reuse;
int v = buffer.read(invert) ^ 0x80;
v = (v << 8) + (buffer.read(invert) & 0xff);
r.set((short) v);
return r;
}
case INT:
{
IntWritable r = reuse == null ? new IntWritable() : (IntWritable) reuse;
r.set(deserializeInt(buffer, invert));
return r;
}
case LONG:
{
LongWritable r = reuse == null ? new LongWritable() : (LongWritable) reuse;
r.set(deserializeLong(buffer, invert));
return r;
}
case FLOAT:
{
FloatWritable r = reuse == null ? new FloatWritable() : (FloatWritable) reuse;
int v = 0;
for (int i = 0; i < 4; i++) {
v = (v << 8) + (buffer.read(invert) & 0xff);
}
if ((v & (1 << 31)) == 0) {
// negative number, flip all bits
v = ~v;
} else {
// positive number, flip the first bit
v = v ^ (1 << 31);
}
r.set(Float.intBitsToFloat(v));
return r;
}
case DOUBLE:
{
DoubleWritable r = reuse == null ? new DoubleWritable() : (DoubleWritable) reuse;
long v = 0;
for (int i = 0; i < 8; i++) {
v = (v << 8) + (buffer.read(invert) & 0xff);
}
if ((v & (1L << 63)) == 0) {
// negative number, flip all bits
v = ~v;
} else {
// positive number, flip the first bit
v = v ^ (1L << 63);
}
r.set(Double.longBitsToDouble(v));
return r;
}
case STRING:
{
Text r = reuse == null ? new Text() : (Text) reuse;
return deserializeText(buffer, invert, r);
}
case CHAR:
{
HiveCharWritable r = reuse == null ? new HiveCharWritable() : (HiveCharWritable) reuse;
// Use internal text member to read value
deserializeText(buffer, invert, r.getTextValue());
r.enforceMaxLength(getCharacterMaxLength(type));
return r;
}
case VARCHAR:
{
HiveVarcharWritable r = reuse == null ? new HiveVarcharWritable() : (HiveVarcharWritable) reuse;
// Use HiveVarchar's internal Text member to read the value.
deserializeText(buffer, invert, r.getTextValue());
// If we cache helper data for deserialization we could avoid having
// to call getVarcharMaxLength() on every deserialize call.
r.enforceMaxLength(getCharacterMaxLength(type));
return r;
}
case BINARY:
{
BytesWritable bw = new BytesWritable();
// Get the actual length first
int start = buffer.tell();
int length = 0;
do {
byte b = buffer.read(invert);
if (b == 0) {
// end of string
break;
}
if (b == 1) {
// the last char is an escape char. read the actual char
buffer.read(invert);
}
length++;
} while (true);
if (length == buffer.tell() - start) {
// No escaping happened, so we are already done.
bw.set(buffer.getData(), start, length);
} else {
// Escaping happened, we need to copy byte-by-byte.
// 1. Set the length first.
bw.set(buffer.getData(), start, length);
// 2. Reset the pointer.
buffer.seek(start);
// 3. Copy the data.
byte[] rdata = bw.getBytes();
for (int i = 0; i < length; i++) {
byte b = buffer.read(invert);
if (b == 1) {
// The last char is an escape char, read the actual char.
// The serialization format escape \0 to \1, and \1 to \2,
// to make sure the string is null-terminated.
b = (byte) (buffer.read(invert) - 1);
}
rdata[i] = b;
}
// 4. Read the null terminator.
byte b = buffer.read(invert);
assert (b == 0);
}
return bw;
}
case DATE:
{
DateWritableV2 d = reuse == null ? new DateWritableV2() : (DateWritableV2) reuse;
d.set(deserializeInt(buffer, invert));
return d;
}
case TIMESTAMP:
TimestampWritableV2 t = (reuse == null ? new TimestampWritableV2() : (TimestampWritableV2) reuse);
byte[] bytes = new byte[TimestampWritableV2.BINARY_SORTABLE_LENGTH];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = buffer.read(invert);
}
t.setBinarySortable(bytes, 0);
return t;
case TIMESTAMPLOCALTZ:
TimestampLocalTZWritable tstz = (reuse == null ? new TimestampLocalTZWritable() : (TimestampLocalTZWritable) reuse);
byte[] data = new byte[TimestampLocalTZWritable.BINARY_SORTABLE_LENGTH];
for (int i = 0; i < data.length; i++) {
data[i] = buffer.read(invert);
}
// Across MR process boundary tz is normalized and stored in type
// and is not carried in data for each row.
tstz.fromBinarySortable(data, 0, ((TimestampLocalTZTypeInfo) type).timeZone());
return tstz;
case INTERVAL_YEAR_MONTH:
{
HiveIntervalYearMonthWritable i = reuse == null ? new HiveIntervalYearMonthWritable() : (HiveIntervalYearMonthWritable) reuse;
i.set(deserializeInt(buffer, invert));
return i;
}
case INTERVAL_DAY_TIME:
{
HiveIntervalDayTimeWritable i = reuse == null ? new HiveIntervalDayTimeWritable() : (HiveIntervalDayTimeWritable) reuse;
long totalSecs = deserializeLong(buffer, invert);
int nanos = deserializeInt(buffer, invert);
i.set(totalSecs, nanos);
return i;
}
case DECIMAL:
{
// See serialization of decimal for explanation (below)
HiveDecimalWritable bdw = (reuse == null ? new HiveDecimalWritable() : (HiveDecimalWritable) reuse);
int b = buffer.read(invert) - 1;
assert (b == 1 || b == -1 || b == 0);
boolean positive = b != -1;
int factor = buffer.read(invert) ^ 0x80;
for (int i = 0; i < 3; i++) {
factor = (factor << 8) + (buffer.read(invert) & 0xff);
}
if (!positive) {
factor = -factor;
}
int start = buffer.tell();
int length = 0;
do {
b = buffer.read(positive ? invert : !invert);
assert (b != 1);
if (b == 0) {
// end of digits
break;
}
length++;
} while (true);
final byte[] decimalBuffer = new byte[length];
buffer.seek(start);
for (int i = 0; i < length; ++i) {
decimalBuffer[i] = buffer.read(positive ? invert : !invert);
}
// read the null byte again
buffer.read(positive ? invert : !invert);
String digits = new String(decimalBuffer, 0, length, decimalCharSet);
BigInteger bi = new BigInteger(digits);
HiveDecimal bd = HiveDecimal.create(bi).scaleByPowerOfTen(factor - length);
if (!positive) {
bd = bd.negate();
}
bdw.set(bd);
return bdw;
}
default:
{
throw new RuntimeException("Unrecognized type: " + ptype.getPrimitiveCategory());
}
}
}
case LIST:
{
ListTypeInfo ltype = (ListTypeInfo) type;
TypeInfo etype = ltype.getListElementTypeInfo();
// Create the list if needed
ArrayList<Object> r = reuse == null ? new ArrayList<Object>() : (ArrayList<Object>) reuse;
// Read the list
int size = 0;
while (true) {
int more = buffer.read(invert);
if (more == 0) {
// \0 to terminate
break;
}
// \1 followed by each element
assert (more == 1);
if (size == r.size()) {
r.add(null);
}
r.set(size, deserialize(buffer, etype, invert, nullMarker, notNullMarker, r.get(size)));
size++;
}
// Remove additional elements if the list is reused
while (r.size() > size) {
r.remove(r.size() - 1);
}
return r;
}
case MAP:
{
MapTypeInfo mtype = (MapTypeInfo) type;
TypeInfo ktype = mtype.getMapKeyTypeInfo();
TypeInfo vtype = mtype.getMapValueTypeInfo();
// Create the map if needed
Map<Object, Object> r;
if (reuse == null || reuse.getClass() != LinkedHashMap.class) {
r = new LinkedHashMap<Object, Object>();
} else {
r = (Map<Object, Object>) reuse;
r.clear();
}
while (true) {
int more = buffer.read(invert);
if (more == 0) {
// \0 to terminate
break;
}
// \1 followed by each key and then each value
assert (more == 1);
Object k = deserialize(buffer, ktype, invert, nullMarker, notNullMarker, null);
Object v = deserialize(buffer, vtype, invert, nullMarker, notNullMarker, null);
r.put(k, v);
}
return r;
}
case STRUCT:
{
StructTypeInfo stype = (StructTypeInfo) type;
List<TypeInfo> fieldTypes = stype.getAllStructFieldTypeInfos();
int size = fieldTypes.size();
// Create the struct if needed
ArrayList<Object> r = reuse == null ? new ArrayList<Object>(size) : (ArrayList<Object>) reuse;
assert (r.size() <= size);
// Set the size of the struct
while (r.size() < size) {
r.add(null);
}
// Read one field by one field
for (int eid = 0; eid < size; eid++) {
r.set(eid, deserialize(buffer, fieldTypes.get(eid), invert, nullMarker, notNullMarker, r.get(eid)));
}
return r;
}
case UNION:
{
UnionTypeInfo utype = (UnionTypeInfo) type;
StandardUnion r = reuse == null ? new StandardUnion() : (StandardUnion) reuse;
// Read the tag
byte tag = buffer.read(invert);
r.setTag(tag);
r.setObject(deserialize(buffer, utype.getAllUnionObjectTypeInfos().get(tag), invert, nullMarker, notNullMarker, null));
return r;
}
default:
{
throw new RuntimeException("Unrecognized type: " + type.getCategory());
}
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class AvroObjectInspectorGenerator method createObjectInspectorWorker.
private ObjectInspector createObjectInspectorWorker(TypeInfo ti) throws SerDeException {
// at deserialization and the object inspector will never see the actual union.
if (!supportedCategories(ti)) {
throw new AvroSerdeException("Don't yet support this type: " + ti);
}
ObjectInspector result;
switch(ti.getCategory()) {
case PRIMITIVE:
PrimitiveTypeInfo pti = (PrimitiveTypeInfo) ti;
result = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(pti);
break;
case STRUCT:
StructTypeInfo sti = (StructTypeInfo) ti;
ArrayList<ObjectInspector> ois = new ArrayList<ObjectInspector>(sti.getAllStructFieldTypeInfos().size());
for (TypeInfo typeInfo : sti.getAllStructFieldTypeInfos()) {
ois.add(createObjectInspectorWorker(typeInfo));
}
result = ObjectInspectorFactory.getStandardStructObjectInspector(sti.getAllStructFieldNames(), ois);
break;
case MAP:
MapTypeInfo mti = (MapTypeInfo) ti;
result = ObjectInspectorFactory.getStandardMapObjectInspector(PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING), createObjectInspectorWorker(mti.getMapValueTypeInfo()));
break;
case LIST:
ListTypeInfo ati = (ListTypeInfo) ti;
result = ObjectInspectorFactory.getStandardListObjectInspector(createObjectInspectorWorker(ati.getListElementTypeInfo()));
break;
case UNION:
UnionTypeInfo uti = (UnionTypeInfo) ti;
List<TypeInfo> allUnionObjectTypeInfos = uti.getAllUnionObjectTypeInfos();
List<ObjectInspector> unionObjectInspectors = new ArrayList<ObjectInspector>(allUnionObjectTypeInfos.size());
for (TypeInfo typeInfo : allUnionObjectTypeInfos) {
unionObjectInspectors.add(createObjectInspectorWorker(typeInfo));
}
result = ObjectInspectorFactory.getStandardUnionObjectInspector(unionObjectInspectors);
break;
default:
throw new AvroSerdeException("No Hive categories matched: " + ti);
}
return result;
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class AvroSerializer method serializeStruct.
private Object serializeStruct(StructTypeInfo typeInfo, StructObjectInspector ssoi, Object o, Schema schema) throws AvroSerdeException {
int size = schema.getFields().size();
List<? extends StructField> allStructFieldRefs = ssoi.getAllStructFieldRefs();
List<Object> structFieldsDataAsList = ssoi.getStructFieldsDataAsList(o);
GenericData.Record record = new GenericData.Record(schema);
List<TypeInfo> allStructFieldTypeInfos = typeInfo.getAllStructFieldTypeInfos();
for (int i = 0; i < size; i++) {
Field field = schema.getFields().get(i);
TypeInfo colTypeInfo = allStructFieldTypeInfos.get(i);
StructField structFieldRef = allStructFieldRefs.get(i);
Object structFieldData = structFieldsDataAsList.get(i);
ObjectInspector fieldOI = structFieldRef.getFieldObjectInspector();
Object val = serialize(colTypeInfo, fieldOI, structFieldData, field.schema());
record.put(field.name(), val);
}
return record;
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class TypeInfoToSchema method createAvroRecord.
private Schema createAvroRecord(TypeInfo typeInfo) {
List<Schema.Field> childFields = new ArrayList<Schema.Field>();
final List<String> allStructFieldNames = ((StructTypeInfo) typeInfo).getAllStructFieldNames();
final List<TypeInfo> allStructFieldTypeInfos = ((StructTypeInfo) typeInfo).getAllStructFieldTypeInfos();
if (allStructFieldNames.size() != allStructFieldTypeInfos.size()) {
throw new IllegalArgumentException("Failed to generate avro schema from hive schema. " + "name and column type differs. names = " + allStructFieldNames + ", types = " + allStructFieldTypeInfos);
}
for (int i = 0; i < allStructFieldNames.size(); ++i) {
final TypeInfo childTypeInfo = allStructFieldTypeInfos.get(i);
final Schema.Field grandChildSchemaField = createAvroField(allStructFieldNames.get(i), childTypeInfo, childTypeInfo.toString());
final List<Schema.Field> grandChildFields = getFields(grandChildSchemaField);
childFields.addAll(grandChildFields);
}
Schema recordSchema = Schema.createRecord("record_" + recordCounter, typeInfo.toString(), null, false);
++recordCounter;
recordSchema.setFields(childFields);
return recordSchema;
}
use of org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo in project hive by apache.
the class VectorSerializeRow method createField.
private Field createField(TypeInfo typeInfo) {
final Field field = new Field();
final Category category = typeInfo.getCategory();
field.category = category;
field.typeInfo = typeInfo;
if (category == Category.PRIMITIVE) {
field.isPrimitive = true;
field.primitiveCategory = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
switch(field.primitiveCategory) {
case BOOLEAN:
field.writer = new VectorSerializeBooleanWriter();
break;
case BYTE:
field.writer = new VectorSerializeByteWriter();
break;
case SHORT:
field.writer = new VectorSerializeShortWriter();
break;
case INT:
field.writer = new VectorSerializeIntWriter();
break;
case LONG:
field.writer = new VectorSerializeLongWriter();
break;
case DATE:
field.writer = new VectorSerializeDateWriter();
break;
case TIMESTAMP:
field.writer = new VectorSerializeTimestampWriter();
break;
case FLOAT:
field.writer = new VectorSerializeFloatWriter();
break;
case DOUBLE:
field.writer = new VectorSerializeDoubleWriter();
break;
case STRING:
case CHAR:
case VARCHAR:
field.writer = new VectorSerializeStringWriter();
break;
case BINARY:
field.writer = new VectorSerializeBinaryWriter();
break;
case DECIMAL:
field.writer = new VectorSerializeDecimalWriter();
break;
case INTERVAL_YEAR_MONTH:
field.writer = new VectorSerializeHiveIntervalYearMonthWriter();
break;
case INTERVAL_DAY_TIME:
field.writer = new VectorSerializeHiveIntervalDayTimeWriter();
break;
default:
throw new RuntimeException("Unexpected primitive category " + field.primitiveCategory);
}
} else {
field.isPrimitive = false;
field.objectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
switch(category) {
case LIST:
field.children = new Field[1];
field.children[0] = createField(((ListTypeInfo) typeInfo).getListElementTypeInfo());
field.writer = new VectorSerializeListWriter();
break;
case MAP:
field.children = new Field[2];
field.children[0] = createField(((MapTypeInfo) typeInfo).getMapKeyTypeInfo());
field.children[1] = createField(((MapTypeInfo) typeInfo).getMapValueTypeInfo());
field.writer = new VectorSerializeMapWriter();
break;
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
field.children = createFields(fieldTypeInfos.toArray(new TypeInfo[fieldTypeInfos.size()]));
field.writer = new VectorSerializeStructWriter();
break;
case UNION:
UnionTypeInfo unionTypeInfo = (UnionTypeInfo) typeInfo;
List<TypeInfo> objectTypeInfos = unionTypeInfo.getAllUnionObjectTypeInfos();
field.children = createFields(objectTypeInfos.toArray(new TypeInfo[objectTypeInfos.size()]));
field.writer = new VectorSerializeUnionWriter();
break;
default:
throw new RuntimeException();
}
field.count = field.children.length;
}
return field;
}
Aggregations