use of org.apache.hadoop.io.IntWritable in project hive by apache.
the class GenericUDFTrunc method evaluateNumber.
private Object evaluateNumber(DeferredObject[] arguments) throws HiveException, UDFArgumentTypeException {
if (arguments[0] == null) {
return null;
}
Object input = arguments[0].get();
if (input == null) {
return null;
}
if (arguments.length == 2 && arguments[1] != null && arguments[1].get() != null && !inputSacleConst) {
Object scaleObj = null;
switch(inputScaleOI.getPrimitiveCategory()) {
case BYTE:
scaleObj = byteConverter.convert(arguments[1].get());
scale = ((ByteWritable) scaleObj).get();
break;
case SHORT:
scaleObj = shortConverter.convert(arguments[1].get());
scale = ((ShortWritable) scaleObj).get();
break;
case INT:
scaleObj = intConverter.convert(arguments[1].get());
scale = ((IntWritable) scaleObj).get();
break;
case LONG:
scaleObj = longConverter.convert(arguments[1].get());
long l = ((LongWritable) scaleObj).get();
if (l < Integer.MIN_VALUE || l > Integer.MAX_VALUE) {
throw new UDFArgumentException(getFuncName().toUpperCase() + " scale argument out of allowed range");
}
scale = (int) l;
default:
break;
}
}
switch(inputType1) {
case VOID:
return null;
case DECIMAL:
HiveDecimalWritable decimalWritable = (HiveDecimalWritable) inputOI.getPrimitiveWritableObject(input);
HiveDecimal dec = trunc(decimalWritable.getHiveDecimal(), scale);
if (dec == null) {
return null;
}
return new HiveDecimalWritable(dec);
case BYTE:
ByteWritable byteWritable = (ByteWritable) inputOI.getPrimitiveWritableObject(input);
if (scale >= 0) {
return byteWritable;
} else {
return new ByteWritable((byte) trunc(byteWritable.get(), scale));
}
case SHORT:
ShortWritable shortWritable = (ShortWritable) inputOI.getPrimitiveWritableObject(input);
if (scale >= 0) {
return shortWritable;
} else {
return new ShortWritable((short) trunc(shortWritable.get(), scale));
}
case INT:
IntWritable intWritable = (IntWritable) inputOI.getPrimitiveWritableObject(input);
if (scale >= 0) {
return intWritable;
} else {
return new IntWritable((int) trunc(intWritable.get(), scale));
}
case LONG:
LongWritable longWritable = (LongWritable) inputOI.getPrimitiveWritableObject(input);
if (scale >= 0) {
return longWritable;
} else {
return new LongWritable(trunc(longWritable.get(), scale));
}
case FLOAT:
float f = ((FloatWritable) inputOI.getPrimitiveWritableObject(input)).get();
return new FloatWritable((float) trunc(f, scale));
case DOUBLE:
return trunc(((DoubleWritable) inputOI.getPrimitiveWritableObject(input)), scale);
default:
throw new UDFArgumentTypeException(0, "Only numeric or string group data types are allowed for TRUNC function. Got " + inputType1.name());
}
}
use of org.apache.hadoop.io.IntWritable in project hive by apache.
the class GenericUDFElt method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
IntWritable intWritable = (IntWritable) converters[0].convert(arguments[0].get());
if (intWritable == null) {
return null;
}
int index = intWritable.get();
if (index <= 0 || index >= arguments.length) {
return null;
}
return converters[index].convert(arguments[index].get());
}
use of org.apache.hadoop.io.IntWritable in project hive by apache.
the class TestVectorDateExpressions method compareToUDFMonthDate.
private void compareToUDFMonthDate(long t, int y) {
UDFMonth udf = new UDFMonth();
TimestampWritable tsw = toTimestampWritable(t);
IntWritable res = udf.evaluate(tsw);
Assert.assertEquals(res.get(), y);
}
use of org.apache.hadoop.io.IntWritable in project hive by apache.
the class TestRCFile method testSimpleReadAndWrite.
@Test
public void testSimpleReadAndWrite() throws IOException, SerDeException {
cleanup();
byte[][] record_1 = { "123".getBytes("UTF-8"), "456".getBytes("UTF-8"), "789".getBytes("UTF-8"), "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"), "hive and hadoop".getBytes("UTF-8"), new byte[0], "NULL".getBytes("UTF-8") };
byte[][] record_2 = { "100".getBytes("UTF-8"), "200".getBytes("UTF-8"), "123".getBytes("UTF-8"), "1000".getBytes("UTF-8"), "5.3".getBytes("UTF-8"), "hive and hadoop".getBytes("UTF-8"), new byte[0], "NULL".getBytes("UTF-8") };
RCFileOutputFormat.setColumnNumber(conf, expectedFieldsData.length);
RCFile.Writer writer = new RCFile.Writer(fs, conf, file, null, RCFile.createMetadata(new Text("apple"), new Text("block"), new Text("cat"), new Text("dog")), new DefaultCodec());
BytesRefArrayWritable bytes = new BytesRefArrayWritable(record_1.length);
for (int i = 0; i < record_1.length; i++) {
BytesRefWritable cu = new BytesRefWritable(record_1[i], 0, record_1[i].length);
bytes.set(i, cu);
}
writer.append(bytes);
bytes.clear();
for (int i = 0; i < record_2.length; i++) {
BytesRefWritable cu = new BytesRefWritable(record_2[i], 0, record_2[i].length);
bytes.set(i, cu);
}
writer.append(bytes);
writer.close();
Object[] expectedRecord_1 = { new ByteWritable((byte) 123), new ShortWritable((short) 456), new IntWritable(789), new LongWritable(1000), new DoubleWritable(5.3), new Text("hive and hadoop"), null, null };
Object[] expectedRecord_2 = { new ByteWritable((byte) 100), new ShortWritable((short) 200), new IntWritable(123), new LongWritable(1000), new DoubleWritable(5.3), new Text("hive and hadoop"), null, null };
RCFile.Reader reader = new RCFile.Reader(fs, file, conf);
assertEquals(new Text("block"), reader.getMetadata().get(new Text("apple")));
assertEquals(new Text("block"), reader.getMetadataValueOf(new Text("apple")));
assertEquals(new Text("dog"), reader.getMetadataValueOf(new Text("cat")));
LongWritable rowID = new LongWritable();
for (int i = 0; i < 2; i++) {
reader.next(rowID);
BytesRefArrayWritable cols = new BytesRefArrayWritable();
reader.getCurrentRow(cols);
cols.resetValid(8);
Object row = serDe.deserialize(cols);
StructObjectInspector oi = (StructObjectInspector) serDe.getObjectInspector();
List<? extends StructField> fieldRefs = oi.getAllStructFieldRefs();
assertEquals("Field size should be 8", 8, fieldRefs.size());
for (int j = 0; j < fieldRefs.size(); j++) {
Object fieldData = oi.getStructFieldData(row, fieldRefs.get(j));
Object standardWritableData = ObjectInspectorUtils.copyToStandardObject(fieldData, fieldRefs.get(j).getFieldObjectInspector(), ObjectInspectorCopyOption.WRITABLE);
if (i == 0) {
assertEquals("Field " + i, standardWritableData, expectedRecord_1[j]);
} else {
assertEquals("Field " + i, standardWritableData, expectedRecord_2[j]);
}
}
}
reader.close();
}
use of org.apache.hadoop.io.IntWritable in project hive by apache.
the class TestAbstractParquetMapInspector method testHashMap.
@Test
public void testHashMap() {
final Map<Writable, Writable> map = new HashMap<Writable, Writable>();
map.put(new IntWritable(0), new IntWritable(1));
map.put(new IntWritable(2), new IntWritable(3));
map.put(new IntWritable(4), new IntWritable(5));
map.put(new IntWritable(6), new IntWritable(7));
assertEquals("Wrong size", 4, inspector.getMapSize(map));
assertEquals("Wrong result of inspection", map, inspector.getMap(map));
}
Aggregations