use of org.apache.hadoop.hive.llap.TypeDesc in project hive by apache.
the class GenericUDTFGetSplits method convertTypeString.
private TypeDesc convertTypeString(String typeString) throws HiveException {
TypeDesc typeDesc;
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeString);
Preconditions.checkState(typeInfo.getCategory() == ObjectInspector.Category.PRIMITIVE, "Unsupported non-primitive type " + typeString);
switch(((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory()) {
case BOOLEAN:
typeDesc = new TypeDesc(TypeDesc.Type.BOOLEAN);
break;
case BYTE:
typeDesc = new TypeDesc(TypeDesc.Type.TINYINT);
break;
case SHORT:
typeDesc = new TypeDesc(TypeDesc.Type.SMALLINT);
break;
case INT:
typeDesc = new TypeDesc(TypeDesc.Type.INT);
break;
case LONG:
typeDesc = new TypeDesc(TypeDesc.Type.BIGINT);
break;
case FLOAT:
typeDesc = new TypeDesc(TypeDesc.Type.FLOAT);
break;
case DOUBLE:
typeDesc = new TypeDesc(TypeDesc.Type.DOUBLE);
break;
case STRING:
typeDesc = new TypeDesc(TypeDesc.Type.STRING);
break;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
typeDesc = new TypeDesc(TypeDesc.Type.CHAR, charTypeInfo.getLength());
break;
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
typeDesc = new TypeDesc(TypeDesc.Type.VARCHAR, varcharTypeInfo.getLength());
break;
case DATE:
typeDesc = new TypeDesc(TypeDesc.Type.DATE);
break;
case TIMESTAMP:
typeDesc = new TypeDesc(TypeDesc.Type.TIMESTAMP);
break;
case BINARY:
typeDesc = new TypeDesc(TypeDesc.Type.BINARY);
break;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
typeDesc = new TypeDesc(TypeDesc.Type.DECIMAL, decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
break;
default:
throw new HiveException("Unsupported type " + typeString);
}
return typeDesc;
}
use of org.apache.hadoop.hive.llap.TypeDesc in project hive by apache.
the class GenericUDTFGetSplits method convertSchema.
private Schema convertSchema(Object obj) throws HiveException {
org.apache.hadoop.hive.metastore.api.Schema schema = (org.apache.hadoop.hive.metastore.api.Schema) obj;
List<FieldDesc> colDescs = new ArrayList<FieldDesc>();
for (FieldSchema fs : schema.getFieldSchemas()) {
String colName = fs.getName();
String typeString = fs.getType();
TypeDesc typeDesc = convertTypeString(typeString);
colDescs.add(new FieldDesc(colName, typeDesc));
}
Schema Schema = new Schema(colDescs);
return Schema;
}
use of org.apache.hadoop.hive.llap.TypeDesc in project hive by apache.
the class TestLlapInputSplit method testWritable.
@Test
public void testWritable() throws Exception {
int splitNum = 88;
byte[] planBytes = "0123456789987654321".getBytes();
byte[] fragmentBytes = "abcdefghijklmnopqrstuvwxyz".getBytes();
SplitLocationInfo[] locations = { new SplitLocationInfo("location1", false), new SplitLocationInfo("location2", false) };
ArrayList<FieldDesc> colDescs = new ArrayList<FieldDesc>();
colDescs.add(new FieldDesc("col1", new TypeDesc(TypeDesc.Type.STRING)));
colDescs.add(new FieldDesc("col2", new TypeDesc(TypeDesc.Type.INT)));
Schema schema = new Schema(colDescs);
byte[] tokenBytes = new byte[] { 1 };
LlapInputSplit split1 = new LlapInputSplit(splitNum, planBytes, fragmentBytes, null, locations, schema, "hive", tokenBytes);
ByteArrayOutputStream byteOutStream = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(byteOutStream);
split1.write(dataOut);
ByteArrayInputStream byteInStream = new ByteArrayInputStream(byteOutStream.toByteArray());
DataInputStream dataIn = new DataInputStream(byteInStream);
LlapInputSplit split2 = new LlapInputSplit();
split2.readFields(dataIn);
// Did we read all the data?
assertEquals(0, byteInStream.available());
checkLlapSplits(split1, split2);
}
Aggregations