use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector in project hive by apache.
the class TypedBytesRecordReader method initialize.
public void initialize(InputStream in, Configuration conf, Properties tbl) throws IOException {
din = new DataInputStream(in);
tbIn = new TypedBytesWritableInput(din);
tbOut = new TypedBytesWritableOutput(barrStr);
String columnTypeProperty = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
columnTypes = Arrays.asList(columnTypeProperty.split(","));
for (String columnType : columnTypes) {
PrimitiveTypeInfo dstTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(columnType);
dstOIns.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(dstTypeInfo));
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector in project hive by apache.
the class GenericUDFAdd10 method initialize.
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 1) {
throw new UDFArgumentLengthException("ADD10() requires 1 argument, got " + arguments.length);
}
if (arguments[0].getCategory() != Category.PRIMITIVE) {
throw new UDFArgumentException("ADD10 only takes primitive types, got " + arguments[0].getTypeName());
}
argumentOI = (PrimitiveObjectInspector) arguments[0];
inputType = argumentOI.getPrimitiveCategory();
ObjectInspector outputOI = null;
switch(inputType) {
case SHORT:
case BYTE:
case INT:
inputConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableIntObjectInspector);
outputOI = PrimitiveObjectInspectorFactory.writableIntObjectInspector;
break;
case LONG:
inputConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableLongObjectInspector);
outputOI = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
break;
case FLOAT:
case STRING:
case DOUBLE:
inputConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
outputOI = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
break;
case DECIMAL:
outputOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(((PrimitiveObjectInspector) arguments[0]).getTypeInfo());
inputConverter = ObjectInspectorConverters.getConverter(arguments[0], outputOI);
break;
default:
throw new UDFArgumentException("ADD10 only takes SHORT/BYTE/INT/LONG/DOUBLE/FLOAT/STRING/DECIMAL types, got " + inputType);
}
return outputOI;
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector in project hive by apache.
the class DynamicPartitionPruner method prunePartitionSingleSource.
@VisibleForTesting
protected void prunePartitionSingleSource(String source, SourceInfo si) throws HiveException {
if (si.skipPruning.get()) {
// in this case we've determined that there's too much data
// to prune dynamically.
LOG.info("Skip pruning on " + source + ", column " + si.columnName);
return;
}
Set<Object> values = si.values;
String columnName = si.columnName;
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder("Pruning ");
sb.append(columnName);
sb.append(" with ");
for (Object value : values) {
sb.append(value == null ? null : value.toString());
sb.append(", ");
}
LOG.debug(sb.toString());
}
ObjectInspector oi = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(TypeInfoFactory.getPrimitiveTypeInfo(si.columnType));
Converter converter = ObjectInspectorConverters.getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, oi);
StructObjectInspector soi = ObjectInspectorFactory.getStandardStructObjectInspector(Collections.singletonList(columnName), Collections.singletonList(oi));
@SuppressWarnings("rawtypes") ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(si.partKey);
eval.initialize(soi);
applyFilterToPartitions(converter, eval, columnName, values);
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector in project hive by apache.
the class TestGenericUDFFloor method testChar.
@Test
public void testChar() throws HiveException {
GenericUDFFloor udf = new GenericUDFFloor();
HiveChar vc = new HiveChar("32300.004747", 12);
HiveCharWritable input = new HiveCharWritable(vc);
CharTypeInfo inputTypeInfo = TypeInfoFactory.getCharTypeInfo(12);
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(inputTypeInfo) };
DeferredObject[] args = { new DeferredJavaObject(input) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.longTypeInfo, oi.getTypeInfo());
LongWritable res = (LongWritable) udf.evaluate(args);
Assert.assertEquals(32300L, res.get());
}
use of org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector in project hive by apache.
the class TestGenericUDFOPNegative method testVarchar.
@Test
public void testVarchar() throws HiveException {
GenericUDFOPNegative udf = new GenericUDFOPNegative();
HiveVarchar vc = new HiveVarchar("32300.004747", 12);
HiveVarcharWritable input = new HiveVarcharWritable(vc);
VarcharTypeInfo inputTypeInfo = TypeInfoFactory.getVarcharTypeInfo(12);
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(inputTypeInfo) };
DeferredObject[] args = { new DeferredJavaObject(input) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.doubleTypeInfo, oi.getTypeInfo());
DoubleWritable res = (DoubleWritable) udf.evaluate(args);
Assert.assertEquals(new Double(-32300.004747), new Double(res.get()));
}
Aggregations