Search in sources :

Example 71 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorMathFunctions method testVectorATan.

@Test
public void testVectorATan() {
    VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
    DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
    b.cols[0].noNulls = true;
    VectorExpression expr = new FuncATanDoubleToDouble(0, 1);
    expr.evaluate(b);
    Assert.assertEquals(Math.atan(0.5d), resultV.vector[4]);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) FuncATanDoubleToDouble(org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncATanDoubleToDouble) Test(org.junit.Test)

Example 72 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorArithmeticExpressions method getVectorizedRowBatch2LongInDoubleOut.

public static VectorizedRowBatch getVectorizedRowBatch2LongInDoubleOut() {
    VectorizedRowBatch batch = new VectorizedRowBatch(3);
    LongColumnVector lcv, lcv2;
    lcv = new LongColumnVector();
    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
        lcv.vector[i] = i * 37;
    }
    batch.cols[0] = lcv;
    lcv2 = new LongColumnVector();
    batch.cols[1] = lcv2;
    for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
        lcv2.vector[i] = i * 37;
    }
    batch.cols[2] = new DoubleColumnVector();
    batch.size = VectorizedRowBatch.DEFAULT_SIZE;
    return batch;
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) TestVectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)

Example 73 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorExpressionWriters method testWriterDouble.

private void testWriterDouble(TypeInfo type) throws HiveException {
    DoubleColumnVector dcv = VectorizedRowGroupGenUtil.generateDoubleColumnVector(true, false, this.vectorSize, new Random(10));
    dcv.isNull[2] = true;
    VectorExpressionWriter vew = getWriter(type);
    for (int i = 0; i < vectorSize; i++) {
        Writable w = (Writable) vew.writeValue(dcv, i);
        if (w != null) {
            Writable expected = getWritableValue(type, dcv.vector[i]);
            Assert.assertEquals(expected, w);
        } else {
            Assert.assertTrue(dcv.isNull[i]);
        }
    }
}
Also used : DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Random(java.util.Random) ByteWritable(org.apache.hadoop.hive.serde2.io.ByteWritable) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) BytesWritable(org.apache.hadoop.io.BytesWritable) TimestampWritable(org.apache.hadoop.hive.serde2.io.TimestampWritable) DoubleWritable(org.apache.hadoop.hive.serde2.io.DoubleWritable) ShortWritable(org.apache.hadoop.hive.serde2.io.ShortWritable) IntWritable(org.apache.hadoop.io.IntWritable) HiveVarcharWritable(org.apache.hadoop.hive.serde2.io.HiveVarcharWritable) BooleanWritable(org.apache.hadoop.io.BooleanWritable) HiveDecimalWritable(org.apache.hadoop.hive.serde2.io.HiveDecimalWritable) FloatWritable(org.apache.hadoop.io.FloatWritable)

Example 74 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.

the class TestVectorConditionalExpressions method testDoubleScalarScalarIfExpr.

@Test
public void testDoubleScalarScalarIfExpr() {
    VectorizedRowBatch batch = getBatch1Long3DoubleVectors();
    VectorExpression expr = new IfExprDoubleScalarDoubleScalar(0, 100.0d, 200.0d, 3);
    DoubleColumnVector r = (DoubleColumnVector) batch.cols[3];
    expr.evaluate(batch);
    assertEquals(true, 200d == r.vector[0]);
    assertEquals(true, 200d == r.vector[1]);
    assertEquals(true, 100d == r.vector[2]);
    assertEquals(true, 100d == r.vector[3]);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) IfExprDoubleScalarDoubleScalar(org.apache.hadoop.hive.ql.exec.vector.expressions.gen.IfExprDoubleScalarDoubleScalar) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Test(org.junit.Test)

Example 75 with DoubleColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project druid by druid-io.

the class DruidOrcInputFormatTest method makeOrcFile.

private File makeOrcFile() throws IOException {
    final File dir = temporaryFolder.newFolder();
    final File testOrc = new File(dir, "test.orc");
    TypeDescription schema = TypeDescription.createStruct().addField("timestamp", TypeDescription.createString()).addField("col1", TypeDescription.createString()).addField("col2", TypeDescription.createList(TypeDescription.createString())).addField("val1", TypeDescription.createFloat());
    Configuration conf = new Configuration();
    Writer writer = OrcFile.createWriter(new Path(testOrc.getPath()), OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000).bufferSize(10000).compress(CompressionKind.ZLIB).version(OrcFile.Version.CURRENT));
    VectorizedRowBatch batch = schema.createRowBatch();
    batch.size = 1;
    ((BytesColumnVector) batch.cols[0]).setRef(0, timestamp.getBytes(), 0, timestamp.length());
    ((BytesColumnVector) batch.cols[1]).setRef(0, col1.getBytes(), 0, col1.length());
    ListColumnVector listColumnVector = (ListColumnVector) batch.cols[2];
    listColumnVector.childCount = col2.length;
    listColumnVector.lengths[0] = 3;
    for (int idx = 0; idx < col2.length; idx++) {
        ((BytesColumnVector) listColumnVector.child).setRef(idx, col2[idx].getBytes(), 0, col2[idx].length());
    }
    ((DoubleColumnVector) batch.cols[3]).vector[0] = val1;
    writer.addRowBatch(batch);
    writer.close();
    return testOrc;
}
Also used : Path(org.apache.hadoop.fs.Path) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DoubleColumnVector(org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector) Configuration(org.apache.hadoop.conf.Configuration) ListColumnVector(org.apache.hadoop.hive.ql.exec.vector.ListColumnVector) BytesColumnVector(org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector) TypeDescription(org.apache.orc.TypeDescription) OrcFile(org.apache.orc.OrcFile) File(java.io.File) Writer(org.apache.orc.Writer)

Aggregations

DoubleColumnVector (org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector)104 VectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch)59 Test (org.junit.Test)37 LongColumnVector (org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)33 BytesColumnVector (org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector)18 DecimalColumnVector (org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector)17 TimestampColumnVector (org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector)13 ColumnVector (org.apache.hadoop.hive.ql.exec.vector.ColumnVector)10 VectorizedParquetRecordReader (org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader)9 Configuration (org.apache.hadoop.conf.Configuration)7 Random (java.util.Random)5 DoubleWritable (org.apache.hadoop.hive.serde2.io.DoubleWritable)5 Timestamp (java.sql.Timestamp)4 TestVectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch)4 HiveDecimalWritable (org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)4 StructColumnVector (org.apache.hadoop.hive.ql.exec.vector.StructColumnVector)3 HiveDecimal (org.apache.hadoop.hive.common.type.HiveDecimal)2 IntervalDayTimeColumnVector (org.apache.hadoop.hive.ql.exec.vector.IntervalDayTimeColumnVector)2 ListColumnVector (org.apache.hadoop.hive.ql.exec.vector.ListColumnVector)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2