Search in sources :

Example 6 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorBetweenIn method doVectorBetweenInTest.

private boolean doVectorBetweenInTest(TypeInfo typeInfo, BetweenInVariation betweenInVariation, List<Object> compareList, List<String> columns, String[] columnNames, TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations, List<ExprNodeDesc> children, GenericUDF udf, ExprNodeGenericFuncDesc exprDesc, BetweenInTestMode betweenInTestMode, VectorRandomBatchSource batchSource, ObjectInspector objectInspector, TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
    HiveConf hiveConf = new HiveConf();
    if (betweenInTestMode == BetweenInTestMode.ADAPTOR) {
        hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE, true);
    }
    final boolean isFilter = betweenInVariation.isFilter;
    VectorizationContext vectorizationContext = new VectorizationContext("name", columns, Arrays.asList(typeInfos), Arrays.asList(dataTypePhysicalVariations), hiveConf);
    VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc, (isFilter ? VectorExpressionDescriptor.Mode.FILTER : VectorExpressionDescriptor.Mode.PROJECTION));
    vectorExpression.transientInit(hiveConf);
    if (betweenInTestMode == BetweenInTestMode.VECTOR_EXPRESSION) {
        String vecExprString = vectorExpression.toString();
        if (vectorExpression instanceof VectorUDFAdaptor) {
            System.out.println("*NO NATIVE VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
        } else if (dataTypePhysicalVariations[0] == DataTypePhysicalVariation.DECIMAL_64) {
            final String nameToCheck = vectorExpression.getClass().getSimpleName();
            if (!nameToCheck.contains("Decimal64")) {
                System.out.println("*EXPECTED DECIMAL_64 VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
            }
        }
    }
    // System.out.println("*VECTOR EXPRESSION* " + vectorExpression.getClass().getSimpleName());
    /*
    System.out.println(
        "*DEBUG* typeInfo " + typeInfo.toString() +
        " betweenInTestMode " + betweenInTestMode +
        " betweenInVariation " + betweenInVariation +
        " vectorExpression " + vectorExpression.toString());
    */
    VectorRandomRowSource rowSource = batchSource.getRowSource();
    VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, rowSource.typeInfos(), rowSource.dataTypePhysicalVariations(), /* dataColumnNums */
    null, /* partitionColumnCount */
    0, /* virtualColumnCount */
    0, /* neededVirtualColumns */
    null, vectorizationContext.getScratchColumnTypeNames(), vectorizationContext.getScratchDataTypePhysicalVariations());
    VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
    VectorExtractRow resultVectorExtractRow = null;
    Object[] scrqtchRow = null;
    if (!isFilter) {
        resultVectorExtractRow = new VectorExtractRow();
        final int outputColumnNum = vectorExpression.getOutputColumnNum();
        resultVectorExtractRow.init(new TypeInfo[] { outputTypeInfo }, new int[] { outputColumnNum });
        scrqtchRow = new Object[1];
    }
    boolean copySelectedInUse = false;
    int[] copySelected = new int[VectorizedRowBatch.DEFAULT_SIZE];
    batchSource.resetBatchIteration();
    int rowIndex = 0;
    while (true) {
        if (!batchSource.fillNextBatch(batch)) {
            break;
        }
        final int originalBatchSize = batch.size;
        if (isFilter) {
            copySelectedInUse = batch.selectedInUse;
            if (batch.selectedInUse) {
                System.arraycopy(batch.selected, 0, copySelected, 0, originalBatchSize);
            }
        }
        // In filter mode, the batch size can be made smaller.
        vectorExpression.evaluate(batch);
        if (!isFilter) {
            extractResultObjects(batch, rowIndex, resultVectorExtractRow, scrqtchRow, objectInspector, resultObjects);
        } else {
            final int currentBatchSize = batch.size;
            if (copySelectedInUse && batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final int originalBatchIndex = copySelected[i];
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == originalBatchIndex) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == i) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (currentBatchSize == 0) {
                // Whole batch got zapped.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(false);
                }
            } else {
                // Every row kept.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(true);
                }
            }
        }
        rowIndex += originalBatchSize;
    }
    return true;
}
Also used : VectorizationContext(org.apache.hadoop.hive.ql.exec.vector.VectorizationContext) VectorUDFAdaptor(org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor) VectorExtractRow(org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) BooleanWritable(org.apache.hadoop.io.BooleanWritable) HiveConf(org.apache.hadoop.hive.conf.HiveConf) VectorExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression) VectorRandomRowSource(org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource)

Example 7 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorArithmetic method doVectorArithmeticTest.

private void doVectorArithmeticTest(TypeInfo typeInfo1, TypeInfo typeInfo2, List<String> columns, String[] columnNames, TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations, List<ExprNodeDesc> children, ExprNodeGenericFuncDesc exprDesc, Arithmetic arithmetic, ArithmeticTestMode arithmeticTestMode, ColumnScalarMode columnScalarMode, VectorRandomBatchSource batchSource, ObjectInspector objectInspector, TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
    HiveConf hiveConf = new HiveConf();
    if (arithmeticTestMode == ArithmeticTestMode.ADAPTOR) {
        hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE, true);
        // Don't use DECIMAL_64 with the VectorUDFAdaptor.
        dataTypePhysicalVariations = null;
    }
    VectorizationContext vectorizationContext = new VectorizationContext("name", columns, Arrays.asList(typeInfos), dataTypePhysicalVariations == null ? null : Arrays.asList(dataTypePhysicalVariations), hiveConf);
    VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc);
    vectorExpression.transientInit(hiveConf);
    if (arithmeticTestMode == ArithmeticTestMode.VECTOR_EXPRESSION && vectorExpression instanceof VectorUDFAdaptor) {
        System.out.println("*NO NATIVE VECTOR EXPRESSION* typeInfo1 " + typeInfo1.toString() + " typeInfo2 " + typeInfo2.toString() + " arithmeticTestMode " + arithmeticTestMode + " columnScalarMode " + columnScalarMode + " vectorExpression " + vectorExpression.toString());
    }
    String[] outputScratchTypeNames = vectorizationContext.getScratchColumnTypeNames();
    DataTypePhysicalVariation[] outputDataTypePhysicalVariations = vectorizationContext.getScratchDataTypePhysicalVariations();
    VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, typeInfos, dataTypePhysicalVariations, /* dataColumnNums */
    null, /* partitionColumnCount */
    0, /* virtualColumnCount */
    0, /* neededVirtualColumns */
    null, outputScratchTypeNames, outputDataTypePhysicalVariations);
    VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
    VectorExtractRow resultVectorExtractRow = new VectorExtractRow();
    resultVectorExtractRow.init(new TypeInfo[] { outputTypeInfo }, new int[] { vectorExpression.getOutputColumnNum() });
    Object[] scrqtchRow = new Object[1];
    // System.out.println("*VECTOR EXPRESSION* " + vectorExpression.getClass().getSimpleName());
    /*
    System.out.println(
        "*DEBUG* typeInfo1 " + typeInfo1.toString() +
        " typeInfo2 " + typeInfo2.toString() +
        " arithmeticTestMode " + arithmeticTestMode +
        " columnScalarMode " + columnScalarMode +
        " vectorExpression " + vectorExpression.toString());
    */
    batchSource.resetBatchIteration();
    int rowIndex = 0;
    while (true) {
        if (!batchSource.fillNextBatch(batch)) {
            break;
        }
        vectorExpression.evaluate(batch);
        extractResultObjects(batch, rowIndex, resultVectorExtractRow, scrqtchRow, objectInspector, resultObjects);
        rowIndex += batch.size;
    }
}
Also used : VectorizationContext(org.apache.hadoop.hive.ql.exec.vector.VectorizationContext) VectorUDFAdaptor(org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor) VectorExtractRow(org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DataTypePhysicalVariation(org.apache.hadoop.hive.common.type.DataTypePhysicalVariation) HiveConf(org.apache.hadoop.hive.conf.HiveConf) VectorExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)

Example 8 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorDateAddSub method doDateAddSubTestsWithDiffColumnScalar.

private void doDateAddSubTestsWithDiffColumnScalar(Random random, String dateTimeStringTypeName, String integerTypeName, ColumnScalarMode columnScalarMode, boolean isAdd) throws Exception {
    TypeInfo dateTimeStringTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(dateTimeStringTypeName);
    PrimitiveCategory dateTimeStringPrimitiveCategory = ((PrimitiveTypeInfo) dateTimeStringTypeInfo).getPrimitiveCategory();
    boolean isStringFamily = (dateTimeStringPrimitiveCategory == PrimitiveCategory.STRING || dateTimeStringPrimitiveCategory == PrimitiveCategory.CHAR || dateTimeStringPrimitiveCategory == PrimitiveCategory.VARCHAR);
    TypeInfo integerTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(integerTypeName);
    PrimitiveCategory integerPrimitiveCategory = ((PrimitiveTypeInfo) integerTypeInfo).getPrimitiveCategory();
    List<GenerationSpec> generationSpecList = new ArrayList<GenerationSpec>();
    List<DataTypePhysicalVariation> explicitDataTypePhysicalVariationList = new ArrayList<DataTypePhysicalVariation>();
    List<String> columns = new ArrayList<String>();
    int columnNum = 1;
    ExprNodeDesc col1Expr;
    if (columnScalarMode == ColumnScalarMode.COLUMN_COLUMN || columnScalarMode == ColumnScalarMode.COLUMN_SCALAR) {
        if (!isStringFamily) {
            generationSpecList.add(GenerationSpec.createSameType(dateTimeStringTypeInfo));
        } else {
            generationSpecList.add(GenerationSpec.createStringFamilyOtherTypeValue(dateTimeStringTypeInfo, TypeInfoFactory.dateTypeInfo));
        }
        explicitDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
        String columnName = "col" + (columnNum++);
        col1Expr = new ExprNodeColumnDesc(dateTimeStringTypeInfo, columnName, "table", false);
        columns.add(columnName);
    } else {
        Object scalar1Object;
        if (!isStringFamily) {
            scalar1Object = VectorRandomRowSource.randomPrimitiveObject(random, (PrimitiveTypeInfo) dateTimeStringTypeInfo);
        } else {
            scalar1Object = VectorRandomRowSource.randomStringFamilyOtherTypeValue(random, dateTimeStringTypeInfo, TypeInfoFactory.dateTypeInfo, false);
        }
        col1Expr = new ExprNodeConstantDesc(dateTimeStringTypeInfo, scalar1Object);
    }
    ExprNodeDesc col2Expr;
    if (columnScalarMode == ColumnScalarMode.COLUMN_COLUMN || columnScalarMode == ColumnScalarMode.SCALAR_COLUMN) {
        generationSpecList.add(GenerationSpec.createSameType(integerTypeInfo));
        explicitDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
        String columnName = "col" + (columnNum++);
        col2Expr = new ExprNodeColumnDesc(integerTypeInfo, columnName, "table", false);
        columns.add(columnName);
    } else {
        Object scalar2Object = VectorRandomRowSource.randomPrimitiveObject(random, (PrimitiveTypeInfo) integerTypeInfo);
        scalar2Object = smallerRange(random, integerPrimitiveCategory, /* wantWritable */
        false);
        col2Expr = new ExprNodeConstantDesc(integerTypeInfo, scalar2Object);
    }
    List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
    children.add(col1Expr);
    children.add(col2Expr);
    // ----------------------------------------------------------------------------------------------
    String[] columnNames = columns.toArray(new String[0]);
    VectorRandomRowSource rowSource = new VectorRandomRowSource();
    rowSource.initGenerationSpecSchema(random, generationSpecList, /* maxComplexDepth */
    0, /* allowNull */
    true, /* isUnicodeOk */
    true, explicitDataTypePhysicalVariationList);
    Object[][] randomRows = rowSource.randomRows(100000);
    if (columnScalarMode == ColumnScalarMode.COLUMN_COLUMN || columnScalarMode == ColumnScalarMode.SCALAR_COLUMN) {
        // Fixup numbers to limit the range to 0 ... N-1.
        for (int i = 0; i < randomRows.length; i++) {
            Object[] row = randomRows[i];
            if (row[columnNum - 2] != null) {
                row[columnNum - 2] = smallerRange(random, integerPrimitiveCategory, /* wantWritable */
                true);
            }
        }
    }
    VectorRandomBatchSource batchSource = VectorRandomBatchSource.createInterestingBatches(random, rowSource, randomRows, null);
    String[] outputScratchTypeNames = new String[] { "date" };
    VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, rowSource.typeInfos(), rowSource.dataTypePhysicalVariations(), /* dataColumnNums */
    null, /* partitionColumnCount */
    0, /* virtualColumnCount */
    0, /* neededVirtualColumns */
    null, outputScratchTypeNames, null);
    final int rowCount = randomRows.length;
    Object[][] resultObjectsArray = new Object[DateAddSubTestMode.count][];
    for (int i = 0; i < DateAddSubTestMode.count; i++) {
        Object[] resultObjects = new Object[rowCount];
        resultObjectsArray[i] = resultObjects;
        GenericUDF udf = (isAdd ? new GenericUDFDateAdd() : new GenericUDFDateSub());
        ExprNodeGenericFuncDesc exprDesc = new ExprNodeGenericFuncDesc(TypeInfoFactory.dateTypeInfo, udf, children);
        DateAddSubTestMode dateAddSubTestMode = DateAddSubTestMode.values()[i];
        switch(dateAddSubTestMode) {
            case ROW_MODE:
                doRowDateAddSubTest(dateTimeStringTypeInfo, integerTypeInfo, columns, children, isAdd, exprDesc, randomRows, columnScalarMode, rowSource.rowStructObjectInspector(), resultObjects);
                break;
            case ADAPTOR:
            case VECTOR_EXPRESSION:
                doVectorDateAddSubTest(dateTimeStringTypeInfo, integerTypeInfo, columns, rowSource.typeInfos(), children, isAdd, exprDesc, dateAddSubTestMode, columnScalarMode, batchSource, batchContext, resultObjects);
                break;
            default:
                throw new RuntimeException("Unexpected IF statement test mode " + dateAddSubTestMode);
        }
    }
    for (int i = 0; i < rowCount; i++) {
        // Row-mode is the expected value.
        Object expectedResult = resultObjectsArray[0][i];
        for (int v = 1; v < DateAddSubTestMode.count; v++) {
            Object vectorResult = resultObjectsArray[v][i];
            if (expectedResult == null || vectorResult == null) {
                if (expectedResult != null || vectorResult != null) {
                    Assert.fail("Row " + i + " " + DateAddSubTestMode.values()[v] + " isAdd " + isAdd + " " + columnScalarMode + " result is NULL " + (vectorResult == null) + " does not match row-mode expected result is NULL " + (expectedResult == null) + " row values " + Arrays.toString(randomRows[i]));
                }
            } else {
                if (!expectedResult.equals(vectorResult)) {
                    Assert.fail("Row " + i + " " + DateAddSubTestMode.values()[v] + " isAdd " + isAdd + " " + columnScalarMode + " result " + vectorResult.toString() + " (" + vectorResult.getClass().getSimpleName() + ")" + " does not match row-mode expected result " + expectedResult.toString() + " (" + expectedResult.getClass().getSimpleName() + ")" + " row values " + Arrays.toString(randomRows[i]));
                }
            }
        }
    }
}
Also used : ArrayList(java.util.ArrayList) GenericUDFDateAdd(org.apache.hadoop.hive.ql.udf.generic.GenericUDFDateAdd) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) DataTypePhysicalVariation(org.apache.hadoop.hive.common.type.DataTypePhysicalVariation) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) PrimitiveCategory(org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) VectorRandomBatchSource(org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource) GenericUDFDateSub(org.apache.hadoop.hive.ql.udf.generic.GenericUDFDateSub) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) GenerationSpec(org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource.GenerationSpec) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) GenericUDF(org.apache.hadoop.hive.ql.udf.generic.GenericUDF) VectorRandomRowSource(org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource)

Example 9 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestInputOutputFormat method testFSCallsVectorizedOrcAcidRowBatchReader.

@Test
public void testFSCallsVectorizedOrcAcidRowBatchReader() throws IOException {
    try {
        MockFileSystem fs = new MockFileSystem(conf);
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/base_0000001/bucket_00000", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/base_0000001/bucket_00001", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/base_0000001/bucket_00002", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/base_0000001/bucket_00003", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000002_0000002_0000/bucket_00000", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000002_0000002_0000/bucket_00001", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000002_0000002_0000/bucket_00002", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000002_0000002_0000/bucket_00003", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000003_0000003_0000/bucket_00000", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000003_0000003_0000/bucket_00001", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000003_0000003_0000/bucket_00002", 1000, new byte[1], new MockBlock("host1")));
        MockFileSystem.addGlobalFile(new MockFile("mock:/a/delta_0000003_0000003_0000/bucket_00003", 1000, new byte[1], new MockBlock("host1")));
        conf.set("bucket_count", "4");
        // set up props for read
        setupAcidProperties(conf, RowType.DUMMYROW);
        conf.setBoolean(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname, true);
        MockPath mockPath = new MockPath(fs, "mock:/a");
        conf.set("mapred.input.dir", mockPath.toString());
        conf.set("fs.defaultFS", "mock:///");
        conf.set("fs.mock.impl", MockFileSystem.class.getName());
        OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
        OrcInputFormat.FileGenerator gen = new OrcInputFormat.FileGenerator(context, () -> fs, new MockPath(fs, "mock:/a"), false, null);
        List<OrcInputFormat.SplitStrategy<?>> splitStrategies = createSplitStrategies(context, gen);
        assertEquals(1, splitStrategies.size());
        assertEquals(true, splitStrategies.get(0) instanceof OrcInputFormat.ACIDSplitStrategy);
        List<OrcSplit> splits = ((OrcInputFormat.ACIDSplitStrategy) splitStrategies.get(0)).getSplits();
        // marker comment to look at stats read ops in target/surefire-reports/*-output.txt
        System.out.println("STATS TRACE START - " + testCaseName.getMethodName());
        // when creating the reader below there are 2 read ops per bucket file (listStatus and open).
        // HIVE-19588 removes listStatus from the code path so there should only be one read ops (open) after HIVE-19588
        int readsBefore = fs.statistics.getReadOps();
        for (OrcSplit split : splits) {
            try {
                new VectorizedOrcAcidRowBatchReader(split, conf, Reporter.NULL, new VectorizedRowBatchCtx());
            } catch (FileFormatException e) {
            // this is expected as these mock files are not valid orc file
            }
        }
        int readsAfter = fs.statistics.getReadOps();
        System.out.println("STATS TRACE END - " + testCaseName.getMethodName());
        int delta = readsAfter - readsBefore;
        // HIVE-16812 adds 1 read of the footer of each file (only if delete delta exists)
        assertEquals(12, delta);
    } finally {
        MockFileSystem.clearGlobalFiles();
    }
}
Also used : Context(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context) FileFormatException(org.apache.orc.FileFormatException) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) Context(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context) SplitStrategy(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.SplitStrategy) Test(org.junit.Test)

Example 10 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class VectorizedOrcAcidRowBatchReader method computeOffsetAndBucket.

/**
 * See {@link #next(NullWritable, VectorizedRowBatch)} first and
 * {@link OrcRawRecordMerger.OriginalReaderPair}.
 * When reading a split of an "original" file and we need to decorate data with ROW__ID.
 * This requires treating multiple files that are part of the same bucket (tranche for unbucketed
 * tables) as a single logical file to number rowids consistently.
 */
static OrcSplit.OffsetAndBucketProperty computeOffsetAndBucket(FileStatus file, Path rootDir, boolean isOriginal, boolean hasDeletes, Configuration conf) throws IOException {
    VectorizedRowBatchCtx vrbCtx = Utilities.getVectorizedRowBatchCtx(conf);
    if (!needSyntheticRowIds(isOriginal, hasDeletes, areRowIdsProjected(vrbCtx))) {
        if (isOriginal) {
            /**
             * Even if we don't need to project ROW_IDs, we still need to check the write ID that
             * created the file to see if it's committed.  See more in
             * {@link #next(NullWritable, VectorizedRowBatch)}.  (In practice getAcidState() should
             * filter out base/delta files but this makes fewer dependencies)
             */
            OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(file.getPath(), rootDir, conf);
            return new OrcSplit.OffsetAndBucketProperty(-1, -1, syntheticTxnInfo.syntheticWriteId);
        }
        return null;
    }
    String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
    ValidWriteIdList validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
    long rowIdOffset = 0;
    OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo = OrcRawRecordMerger.TransactionMetaData.findWriteIDForSynthetcRowIDs(file.getPath(), rootDir, conf);
    int bucketId = AcidUtils.parseBucketId(file.getPath());
    int bucketProperty = BucketCodec.V1.encode(new AcidOutputFormat.Options(conf).statementId(syntheticTxnInfo.statementId).bucket(bucketId));
    AcidDirectory directoryState = AcidUtils.getAcidState(null, syntheticTxnInfo.folder, conf, validWriteIdList, Ref.from(false), true);
    for (HadoopShims.HdfsFileStatusWithId f : directoryState.getOriginalFiles()) {
        int bucketIdFromPath = AcidUtils.parseBucketId(f.getFileStatus().getPath());
        if (bucketIdFromPath != bucketId) {
            // HIVE-16952
            continue;
        }
        if (f.getFileStatus().getPath().equals(file.getPath())) {
            // 'f' is the file whence this split is
            break;
        }
        Reader reader = OrcFile.createReader(f.getFileStatus().getPath(), OrcFile.readerOptions(conf));
        rowIdOffset += reader.getNumberOfRows();
    }
    return new OrcSplit.OffsetAndBucketProperty(rowIdOffset, bucketProperty, syntheticTxnInfo.syntheticWriteId);
}
Also used : HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) AcidDirectory(org.apache.hadoop.hive.ql.io.AcidDirectory) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList)

Aggregations

VectorizedRowBatchCtx (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx)34 VectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch)14 HiveConf (org.apache.hadoop.hive.conf.HiveConf)12 VectorExtractRow (org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow)12 VectorRandomRowSource (org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource)12 VectorizationContext (org.apache.hadoop.hive.ql.exec.vector.VectorizationContext)12 VectorUDFAdaptor (org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor)11 DataTypePhysicalVariation (org.apache.hadoop.hive.common.type.DataTypePhysicalVariation)10 TypeInfo (org.apache.hadoop.hive.serde2.typeinfo.TypeInfo)10 ArrayList (java.util.ArrayList)9 VectorExpression (org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)8 DecimalTypeInfo (org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo)8 PrimitiveTypeInfo (org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo)8 GenericUDF (org.apache.hadoop.hive.ql.udf.generic.GenericUDF)7 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)6 VectorRandomBatchSource (org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource)5 GenerationSpec (org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource.GenerationSpec)5 AcidOutputFormat (org.apache.hadoop.hive.ql.io.AcidOutputFormat)5 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)5 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)5