Search in sources :

Example 6 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorExpressionWriters method testSetterDouble.

private void testSetterDouble(TypeInfo type) throws HiveException {
    DoubleColumnVector dcv = VectorizedRowGroupGenUtil.generateDoubleColumnVector(true, false, this.vectorSize, new Random(10));
    dcv.isNull[2] = true;
    Object[] values = new Object[this.vectorSize];
    VectorExpressionWriter vew = getWriter(type);
    for (int i = 0; i < vectorSize; i++) {
        // setValue() should be able to handle null input
        values[i] = null;
        values[i] = vew.setValue(values[i], dcv, i);
        if (values[i] != null) {
            Writable expected = getWritableValue(type, dcv.vector[i]);
            Assert.assertEquals(expected, values[i]);
        } else {
            Assert.assertTrue(dcv.isNull[i]);
        }
    }
}
Also used : DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Random(java.util.Random) ByteWritable(org.apache.hadoop.hive.serde2.io.ByteWritable) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) BytesWritable(org.apache.hadoop.io.BytesWritable) TimestampWritable(org.apache.hadoop.hive.serde2.io.TimestampWritable) DoubleWritable(org.apache.hadoop.hive.serde2.io.DoubleWritable) ShortWritable(org.apache.hadoop.hive.serde2.io.ShortWritable) IntWritable(org.apache.hadoop.io.IntWritable) HiveVarcharWritable(org.apache.hadoop.hive.serde2.io.HiveVarcharWritable) BooleanWritable(org.apache.hadoop.io.BooleanWritable) HiveDecimalWritable(org.apache.hadoop.hive.serde2.io.HiveDecimalWritable) FloatWritable(org.apache.hadoop.io.FloatWritable)

Example 7 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorMathFunctions method testVectorRound.

@Test
public void testVectorRound() {
    VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
    VectorExpression expr = new FuncRoundDoubleToDouble(0, 1);
    DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
    b.cols[0].noNulls = true;
    expr.evaluate(b);
    Assert.assertEquals(-2d, resultV.vector[0]);
    Assert.assertEquals(-1d, resultV.vector[1]);
    Assert.assertEquals(0d, resultV.vector[2]);
    Assert.assertEquals(0d, resultV.vector[3]);
    Assert.assertEquals(1d, resultV.vector[4]);
    Assert.assertEquals(1d, resultV.vector[5]);
    Assert.assertEquals(2d, resultV.vector[6]);
    // spot check null propagation
    b.cols[0].noNulls = false;
    b.cols[0].isNull[3] = true;
    resultV.noNulls = true;
    expr.evaluate(b);
    Assert.assertEquals(true, resultV.isNull[3]);
    Assert.assertEquals(false, resultV.noNulls);
    // check isRepeating propagation
    b.cols[0].isRepeating = true;
    resultV.isRepeating = false;
    expr.evaluate(b);
    Assert.assertEquals(-2d, resultV.vector[0]);
    Assert.assertEquals(true, resultV.isRepeating);
    resultV.isRepeating = false;
    b.cols[0].noNulls = true;
    expr.evaluate(b);
    Assert.assertEquals(-2d, resultV.vector[0]);
    Assert.assertEquals(true, resultV.isRepeating);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) FuncRoundDoubleToDouble(org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncRoundDoubleToDouble) Test(org.junit.Test)

Example 8 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorMathFunctions method testVectorExp.

@Test
public void testVectorExp() {
    VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
    DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
    b.cols[0].noNulls = true;
    VectorExpression expr = new FuncExpDoubleToDouble(0, 1);
    expr.evaluate(b);
    Assert.assertEquals(Math.exp(0.5d), resultV.vector[4]);
}
Also used : FuncExpDoubleToDouble(org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncExpDoubleToDouble) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Test(org.junit.Test)

Example 9 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorMathFunctions method testVectorTan.

@Test
public void testVectorTan() {
    VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
    DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
    b.cols[0].noNulls = true;
    VectorExpression expr = new FuncTanDoubleToDouble(0, 1);
    expr.evaluate(b);
    Assert.assertEquals(Math.tan(0.5d), resultV.vector[4]);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) FuncTanDoubleToDouble(org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncTanDoubleToDouble) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Test(org.junit.Test)

Example 10 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorScalarColArithmetic method testScalarLongDivide.

@Test
public void testScalarLongDivide() {
    VectorizedRowBatch batch = TestVectorArithmeticExpressions.getVectorizedRowBatch2LongInDoubleOut();
    LongScalarDivideLongColumn expr = new LongScalarDivideLongColumn(100, 0, 2);
    batch.cols[0].isNull[1] = true;
    batch.cols[0].noNulls = false;
    DoubleColumnVector out = (DoubleColumnVector) batch.cols[2];
    // set now so we can verify it changed
    out.noNulls = true;
    out.isRepeating = true;
    expr.evaluate(batch);
    // verify zero-divide result for position 0
    assertTrue(out.isNull[0]);
    assertTrue(Double.isNaN(out.vector[0]));
    // verify NULL output in entry 1 is correct
    assertTrue(out.isNull[1]);
    assertTrue(Double.isNaN(out.vector[1]));
    // check entries beyond 2nd one
    for (int i = 2; i != batch.size; i++) {
        assertTrue(equalsWithinTolerance(100d / (i * 37), out.vector[i]));
    }
    assertFalse(out.noNulls);
    assertFalse(out.isRepeating);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) TestVectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Test(org.junit.Test)

Aggregations

DoubleColumnVector (org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector)101 VectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch)58 Test (org.junit.Test)37 LongColumnVector (org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)31 BytesColumnVector (org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector)17 DecimalColumnVector (org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector)16 TimestampColumnVector (org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector)11 ColumnVector (org.apache.hadoop.hive.ql.exec.vector.ColumnVector)9 VectorizedParquetRecordReader (org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader)9 Configuration (org.apache.hadoop.conf.Configuration)6 Random (java.util.Random)5 DoubleWritable (org.apache.hadoop.hive.serde2.io.DoubleWritable)5 TestVectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch)4 HiveDecimalWritable (org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)4 Timestamp (java.sql.Timestamp)3 StructColumnVector (org.apache.hadoop.hive.ql.exec.vector.StructColumnVector)3 IntervalDayTimeColumnVector (org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector)2 ListColumnVector (org.apache.hadoop.hive.ql.exec.vector.ListColumnVector)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 Output (org.apache.hadoop.hive.serde2.ByteStream.Output)2