Search in sources :

Example 46 with VectorizedRowBatch

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.

the class TestVectorBetweenIn method doVectorBetweenInTest.

private boolean doVectorBetweenInTest(TypeInfo typeInfo, BetweenInVariation betweenInVariation, List<Object> compareList, List<String> columns, String[] columnNames, TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations, List<ExprNodeDesc> children, GenericUDF udf, ExprNodeGenericFuncDesc exprDesc, BetweenInTestMode betweenInTestMode, VectorRandomBatchSource batchSource, ObjectInspector objectInspector, TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
    HiveConf hiveConf = new HiveConf();
    if (betweenInTestMode == BetweenInTestMode.ADAPTOR) {
        hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE, true);
    }
    final boolean isFilter = betweenInVariation.isFilter;
    VectorizationContext vectorizationContext = new VectorizationContext("name", columns, Arrays.asList(typeInfos), Arrays.asList(dataTypePhysicalVariations), hiveConf);
    VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc, (isFilter ? VectorExpressionDescriptor.Mode.FILTER : VectorExpressionDescriptor.Mode.PROJECTION));
    vectorExpression.transientInit(hiveConf);
    if (betweenInTestMode == BetweenInTestMode.VECTOR_EXPRESSION) {
        String vecExprString = vectorExpression.toString();
        if (vectorExpression instanceof VectorUDFAdaptor) {
            System.out.println("*NO NATIVE VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
        } else if (dataTypePhysicalVariations[0] == DataTypePhysicalVariation.DECIMAL_64) {
            final String nameToCheck = vectorExpression.getClass().getSimpleName();
            if (!nameToCheck.contains("Decimal64")) {
                System.out.println("*EXPECTED DECIMAL_64 VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
            }
        }
    }
    // System.out.println("*VECTOR EXPRESSION* " + vectorExpression.getClass().getSimpleName());
    /*
    System.out.println(
        "*DEBUG* typeInfo " + typeInfo.toString() +
        " betweenInTestMode " + betweenInTestMode +
        " betweenInVariation " + betweenInVariation +
        " vectorExpression " + vectorExpression.toString());
    */
    VectorRandomRowSource rowSource = batchSource.getRowSource();
    VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, rowSource.typeInfos(), rowSource.dataTypePhysicalVariations(), /* dataColumnNums */
    null, /* partitionColumnCount */
    0, /* virtualColumnCount */
    0, /* neededVirtualColumns */
    null, vectorizationContext.getScratchColumnTypeNames(), vectorizationContext.getScratchDataTypePhysicalVariations());
    VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
    VectorExtractRow resultVectorExtractRow = null;
    Object[] scrqtchRow = null;
    if (!isFilter) {
        resultVectorExtractRow = new VectorExtractRow();
        final int outputColumnNum = vectorExpression.getOutputColumnNum();
        resultVectorExtractRow.init(new TypeInfo[] { outputTypeInfo }, new int[] { outputColumnNum });
        scrqtchRow = new Object[1];
    }
    boolean copySelectedInUse = false;
    int[] copySelected = new int[VectorizedRowBatch.DEFAULT_SIZE];
    batchSource.resetBatchIteration();
    int rowIndex = 0;
    while (true) {
        if (!batchSource.fillNextBatch(batch)) {
            break;
        }
        final int originalBatchSize = batch.size;
        if (isFilter) {
            copySelectedInUse = batch.selectedInUse;
            if (batch.selectedInUse) {
                System.arraycopy(batch.selected, 0, copySelected, 0, originalBatchSize);
            }
        }
        // In filter mode, the batch size can be made smaller.
        vectorExpression.evaluate(batch);
        if (!isFilter) {
            extractResultObjects(batch, rowIndex, resultVectorExtractRow, scrqtchRow, objectInspector, resultObjects);
        } else {
            final int currentBatchSize = batch.size;
            if (copySelectedInUse && batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final int originalBatchIndex = copySelected[i];
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == originalBatchIndex) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == i) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (currentBatchSize == 0) {
                // Whole batch got zapped.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(false);
                }
            } else {
                // Every row kept.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(true);
                }
            }
        }
        rowIndex += originalBatchSize;
    }
    return true;
}
Also used : VectorizationContext(org.apache.hadoop.hive.ql.exec.vector.VectorizationContext) VectorUDFAdaptor(org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor) VectorExtractRow(org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) BooleanWritable(org.apache.hadoop.io.BooleanWritable) HiveConf(org.apache.hadoop.hive.conf.HiveConf) VectorExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression) VectorRandomRowSource(org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource)

Example 47 with VectorizedRowBatch

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.

the class TestMurmurHashExpression method testMurmurHashIntColIntCol.

@Test
public void testMurmurHashIntColIntCol() throws HiveException {
    LongColumnVector cvInt1 = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
    LongColumnVector cvInt2 = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
    VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
    vrb.cols[0] = cvInt1;
    vrb.cols[1] = cvInt2;
    vrb.cols[2] = new LongColumnVector(SIZE);
    new MurmurHashIntColIntCol(0, 1, 2).evaluate(vrb);
    // non-repeating
    Assert.assertEquals(false, vrb.cols[2].isRepeating);
    for (int i = 0; i < SIZE; i++) {
        Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { new LongWritable(cvInt1.vector[i]), new LongWritable(cvInt2.vector[i]) }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableLongObjectInspector, PrimitiveObjectInspectorFactory.writableLongObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[i]);
    }
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) LongWritable(org.apache.hadoop.io.LongWritable) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector) Test(org.junit.Test)

Example 48 with VectorizedRowBatch

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.

the class TestMurmurHashExpression method testMurmurHashRepeating.

@Test
public void testMurmurHashRepeating() throws HiveException {
    BytesColumnVector cvString1 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
    BytesColumnVector cvString2 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
    VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
    vrb.cols[0] = cvString1;
    vrb.cols[1] = cvString2;
    vrb.cols[2] = new LongColumnVector(SIZE);
    new MurmurHashStringColStringCol(0, 1, 2).evaluate(vrb);
    // both of the inputs were repeating
    Assert.assertEquals(true, vrb.cols[2].isRepeating);
    Text t1 = new Text();
    t1.set(cvString1.vector[0], cvString1.start[0], cvString1.length[0]);
    Text t2 = new Text();
    t2.set(cvString2.vector[0], cvString2.start[0], cvString2.length[0]);
    // output's first element is the hash of first input elements
    Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { t1, t2 }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableStringObjectInspector, PrimitiveObjectInspectorFactory.writableStringObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[0]);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) BytesColumnVector(org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector) Text(org.apache.hadoop.io.Text) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector) Test(org.junit.Test)

Example 49 with VectorizedRowBatch

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.

the class TestMurmurHashExpression method testMurmurHashRepeatingBothNulls.

@Test
public void testMurmurHashRepeatingBothNulls() throws HiveException {
    BytesColumnVector cvString1 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
    BytesColumnVector cvString2 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
    cvString1.isNull[0] = true;
    cvString2.isNull[0] = true;
    VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
    vrb.cols[0] = cvString1;
    vrb.cols[1] = cvString2;
    vrb.cols[2] = new LongColumnVector(SIZE);
    // fake output value to test short-circuiting
    ((LongColumnVector) vrb.cols[2]).vector[1] = 1234;
    new MurmurHashStringColStringCol(0, 1, 2).evaluate(vrb);
    // both of the inputs were repeating
    Assert.assertEquals(true, vrb.cols[2].isRepeating);
    // output's first element is 0, which is hash of null elements
    Assert.assertEquals(0, ((LongColumnVector) vrb.cols[2]).vector[0]);
    // if isRepeating, vectorization logic is not supposed to touch other elements than 0th
    Assert.assertEquals(1234, ((LongColumnVector) vrb.cols[2]).vector[1]);
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) BytesColumnVector(org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector) Test(org.junit.Test)

Example 50 with VectorizedRowBatch

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.

the class TestMurmurHashExpression method testMurmurHashStringColIntCol.

@Test
public void testMurmurHashStringColIntCol() throws HiveException {
    BytesColumnVector cvString = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, false, SIZE, rand);
    LongColumnVector cvInt = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
    VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
    vrb.cols[0] = cvString;
    vrb.cols[1] = cvInt;
    vrb.cols[2] = new LongColumnVector(SIZE);
    new MurmurHashStringColIntCol(0, 1, 2).evaluate(vrb);
    for (int i = 0; i < SIZE; i++) {
        Text t = new Text();
        t.set(cvString.vector[i], cvString.start[i], cvString.length[i]);
        Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { t, new LongWritable(cvInt.vector[i]) }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableStringObjectInspector, PrimitiveObjectInspectorFactory.writableLongObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[i]);
    }
}
Also used : VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) BytesColumnVector(org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector) Text(org.apache.hadoop.io.Text) LongWritable(org.apache.hadoop.io.LongWritable) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector) Test(org.junit.Test)

Aggregations

VectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch)401 Test (org.junit.Test)214 LongColumnVector (org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)157 BytesColumnVector (org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector)98 TestVectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.TestVectorizedRowBatch)83 DoubleColumnVector (org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector)64 DecimalColumnVector (org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector)40 TimestampColumnVector (org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector)32 VectorExpression (org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)30 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)28 VectorizedParquetRecordReader (org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader)26 Configuration (org.apache.hadoop.conf.Configuration)23 IOException (java.io.IOException)20 HiveConf (org.apache.hadoop.hive.conf.HiveConf)20 VectorExtractRow (org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow)19 HiveDecimal (org.apache.hadoop.hive.common.type.HiveDecimal)18 VectorizationContext (org.apache.hadoop.hive.ql.exec.vector.VectorizationContext)18 Timestamp (java.sql.Timestamp)17 VectorUDFAdaptor (org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor)16 VectorizedRowBatchCtx (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx)15