use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorScalarColArithmetic method testLongScalarDivide.
@Test
public void testLongScalarDivide() {
VectorizedRowBatch batch = TestVectorArithmeticExpressions.getVectorizedRowBatch2LongInDoubleOut();
LongColDivideLongScalar expr = new LongColDivideLongScalar(0, 100, 2);
batch.cols[0].isNull[0] = true;
batch.cols[0].noNulls = false;
DoubleColumnVector out = (DoubleColumnVector) batch.cols[2];
// set now so we can verify it changed
out.noNulls = true;
out.isRepeating = true;
expr.evaluate(batch);
// verify NULL output in entry 0 is correct
assertFalse(out.noNulls);
assertTrue(out.isNull[0]);
assertTrue(Double.isNaN(out.vector[0]));
// check entries beyond first one
for (int i = 1; i != batch.size; i++) {
assertTrue(equalsWithinTolerance((i * 37) / 100d, out.vector[i]));
}
assertFalse(out.isRepeating);
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorScalarColArithmetic method getVectorizedRowBatchSingleLongVector.
private VectorizedRowBatch getVectorizedRowBatchSingleLongVector(int size) {
VectorizedRowBatch batch = new VectorizedRowBatch(2, size);
LongColumnVector lcv = new LongColumnVector(size);
for (int i = 0; i < size; i++) {
lcv.vector[i] = i * 37;
}
batch.cols[0] = lcv;
batch.cols[1] = new LongColumnVector(size);
batch.size = size;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method getVectorizedRowBatchTimestampInLongOut.
public static VectorizedRowBatch getVectorizedRowBatchTimestampInLongOut(long[] longValues) {
Random r = new Random(345);
VectorizedRowBatch batch = new VectorizedRowBatch(2);
TimestampColumnVector inV;
LongColumnVector outV;
inV = new TimestampColumnVector(longValues.length);
outV = new LongColumnVector(longValues.length);
for (int i = 0; i < longValues.length; i++) {
Timestamp randTimestamp = RandomTypeUtil.getRandTimestamp(r);
longValues[i] = TimestampWritable.getLong(randTimestamp);
inV.set(0, randTimestamp);
}
batch.cols[0] = inV;
batch.cols[1] = outV;
batch.size = longValues.length;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method testVectorSqrt.
@Test
public void testVectorSqrt() {
VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new FuncSqrtDoubleToDouble(0, 1);
expr.evaluate(b);
Assert.assertEquals(Math.sqrt(0.5d), resultV.vector[4]);
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorMathFunctions method testVectorLog10.
@Test
public void testVectorLog10() {
// test double->double version
VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new FuncLog10DoubleToDouble(0, 1);
expr.evaluate(b);
Assert.assertTrue(equalsWithinTolerance(Math.log(0.5d) / Math.log(10), resultV.vector[4]));
// test long->double version
b = getVectorizedRowBatchLongInDoubleOut();
resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
expr = new FuncLog10LongToDouble(0, 1);
expr.evaluate(b);
Assert.assertEquals(Math.log(1) / Math.log(10), resultV.vector[3]);
}
Aggregations