use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class TestVectorMathFunctions method testVectorATan.
@Test
public void testVectorATan() {
VectorizedRowBatch b = getVectorizedRowBatchDoubleInDoubleOut();
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new FuncATanDoubleToDouble(0, 1);
expr.evaluate(b);
Assert.assertEquals(Math.atan(0.5d), resultV.vector[4]);
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class TestVectorArithmeticExpressions method getVectorizedRowBatch2LongInDoubleOut.
public static VectorizedRowBatch getVectorizedRowBatch2LongInDoubleOut() {
VectorizedRowBatch batch = new VectorizedRowBatch(3);
LongColumnVector lcv, lcv2;
lcv = new LongColumnVector();
for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
lcv.vector[i] = i * 37;
}
batch.cols[0] = lcv;
lcv2 = new LongColumnVector();
batch.cols[1] = lcv2;
for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
lcv2.vector[i] = i * 37;
}
batch.cols[2] = new DoubleColumnVector();
batch.size = VectorizedRowBatch.DEFAULT_SIZE;
return batch;
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class TestVectorExpressionWriters method testWriterDouble.
private void testWriterDouble(TypeInfo type) throws HiveException {
DoubleColumnVector dcv = VectorizedRowGroupGenUtil.generateDoubleColumnVector(true, false, this.vectorSize, new Random(10));
dcv.isNull[2] = true;
VectorExpressionWriter vew = getWriter(type);
for (int i = 0; i < vectorSize; i++) {
Writable w = (Writable) vew.writeValue(dcv, i);
if (w != null) {
Writable expected = getWritableValue(type, dcv.vector[i]);
Assert.assertEquals(expected, w);
} else {
Assert.assertTrue(dcv.isNull[i]);
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project hive by apache.
the class TestVectorConditionalExpressions method testDoubleScalarScalarIfExpr.
@Test
public void testDoubleScalarScalarIfExpr() {
VectorizedRowBatch batch = getBatch1Long3DoubleVectors();
VectorExpression expr = new IfExprDoubleScalarDoubleScalar(0, 100.0d, 200.0d, 3);
DoubleColumnVector r = (DoubleColumnVector) batch.cols[3];
expr.evaluate(batch);
assertEquals(true, 200d == r.vector[0]);
assertEquals(true, 200d == r.vector[1]);
assertEquals(true, 100d == r.vector[2]);
assertEquals(true, 100d == r.vector[3]);
}
use of org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector in project druid by druid-io.
the class DruidOrcInputFormatTest method makeOrcFile.
private File makeOrcFile() throws IOException {
final File dir = temporaryFolder.newFolder();
final File testOrc = new File(dir, "test.orc");
TypeDescription schema = TypeDescription.createStruct().addField("timestamp", TypeDescription.createString()).addField("col1", TypeDescription.createString()).addField("col2", TypeDescription.createList(TypeDescription.createString())).addField("val1", TypeDescription.createFloat());
Configuration conf = new Configuration();
Writer writer = OrcFile.createWriter(new Path(testOrc.getPath()), OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000).bufferSize(10000).compress(CompressionKind.ZLIB).version(OrcFile.Version.CURRENT));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1;
((BytesColumnVector) batch.cols[0]).setRef(0, timestamp.getBytes(), 0, timestamp.length());
((BytesColumnVector) batch.cols[1]).setRef(0, col1.getBytes(), 0, col1.length());
ListColumnVector listColumnVector = (ListColumnVector) batch.cols[2];
listColumnVector.childCount = col2.length;
listColumnVector.lengths[0] = 3;
for (int idx = 0; idx < col2.length; idx++) {
((BytesColumnVector) listColumnVector.child).setRef(idx, col2[idx].getBytes(), 0, col2[idx].length());
}
((DoubleColumnVector) batch.cols[3]).vector[0] = val1;
writer.addRowBatch(batch);
writer.close();
return testOrc;
}
Aggregations