use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method getVectorizedRowBatchDoubleInDoubleOut.
public static VectorizedRowBatch getVectorizedRowBatchDoubleInDoubleOut() {
VectorizedRowBatch batch = new VectorizedRowBatch(2);
DoubleColumnVector inV;
DoubleColumnVector outV;
outV = new DoubleColumnVector();
inV = new DoubleColumnVector();
inV.vector[0] = -1.5d;
inV.vector[1] = -0.5d;
inV.vector[2] = -0.1d;
inV.vector[3] = 0d;
inV.vector[4] = 0.5d;
inV.vector[5] = 0.7d;
inV.vector[6] = 1.5d;
inV.vector[7] = 1.2345678d;
batch.cols[0] = inV;
batch.cols[1] = outV;
batch.size = 8;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method getVectorizedRowBatchTimestampInStringOut.
public static VectorizedRowBatch getVectorizedRowBatchTimestampInStringOut(long[] epochSecondValues, int[] nanoValues) {
Random r = new Random(345);
VectorizedRowBatch batch = new VectorizedRowBatch(2);
batch.size = epochSecondValues.length;
TimestampColumnVector inV;
BytesColumnVector outV;
inV = new TimestampColumnVector(batch.size);
outV = new BytesColumnVector(batch.size);
for (int i = 0; i < batch.size; i++) {
Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r);
epochSecondValues[i] = randTimestamp.toEpochSecond();
nanoValues[i] = randTimestamp.getNanos();
inV.set(i, randTimestamp.toSqlTimestamp());
}
batch.cols[0] = inV;
batch.cols[1] = outV;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method getVectorizedRowBatchStringInTimestampOutFormatted.
public static VectorizedRowBatch getVectorizedRowBatchStringInTimestampOutFormatted() {
VectorizedRowBatch batch = new VectorizedRowBatch(2);
BytesColumnVector inV;
inV = new BytesColumnVector();
inV.initBuffer();
inV.setVal(0, StandardCharsets.UTF_8.encode("2019-12-31 00:00:00.999999999").array());
inV.setVal(1, StandardCharsets.UTF_8.encode("1776-07-04 17:07:06.177617761").array());
inV.setVal(2, StandardCharsets.UTF_8.encode("2012-02-29 23:59:59.999999999").array());
inV.setVal(3, StandardCharsets.UTF_8.encode("1580-08-08 00:00:00.0").array());
inV.setVal(4, StandardCharsets.UTF_8.encode("0005-01-01 00:00:00.0").array());
inV.setVal(5, StandardCharsets.UTF_8.encode("9999-12-31 23:59:59.999999999").array());
batch.cols[0] = inV;
batch.size = 6;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method testVectorPower.
@Test
public void testVectorPower() throws HiveException {
VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new FuncPowerDoubleToDouble(0, 2.0, 1);
expr.evaluate(b);
Assert.assertTrue(equalsWithinTolerance(0.5d * 0.5d, resultV.vector[4]));
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method testVectorLog10.
@Test
public void testVectorLog10() throws HiveException {
// test double->double version
VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new FuncLog10DoubleToDouble(0, 1);
expr.evaluate(b);
Assert.assertTrue(equalsWithinTolerance(Math.log(0.5d) / Math.log(10), resultV.vector[4]));
// test long->double version
b = getVectorizedRowBatchLongInDoubleOut();
resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
expr = new FuncLog10LongToDouble(0, 1);
expr.evaluate(b);
Assert.assertEquals(Math.log(1) / Math.log(10), resultV.vector[3], Double.MIN_VALUE);
}
Aggregations