Search in sources :

Example 16 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorizedOrcAcidRowBatchReader method testVectorizedOrcAcidRowBatchReader.

private void testVectorizedOrcAcidRowBatchReader(String deleteEventRegistry) throws Exception {
    List<OrcInputFormat.SplitStrategy<?>> splitStrategies = getSplitStrategies();
    assertEquals(1, splitStrategies.size());
    List<OrcSplit> splits = ((OrcInputFormat.ACIDSplitStrategy) splitStrategies.get(0)).getSplits();
    assertEquals(1, splits.size());
    assertEquals(root.toUri().toString() + File.separator + "delta_0000001_0000010_0000/bucket_00000", splits.get(0).getPath().toUri().toString());
    assertFalse(splits.get(0).isOriginal());
    // Mark one of the transactions as an exception to test that invalid transactions
    // are being handled properly.
    // Exclude transaction 5
    conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:14:1:1:5");
    VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, new VectorizedRowBatchCtx());
    if (deleteEventRegistry.equals(ColumnizedDeleteEventRegistry.class.getName())) {
        assertTrue(vectorizedReader.getDeleteEventRegistry() instanceof ColumnizedDeleteEventRegistry);
    }
    if (deleteEventRegistry.equals(SortMergedDeleteEventRegistry.class.getName())) {
        assertTrue(vectorizedReader.getDeleteEventRegistry() instanceof SortMergedDeleteEventRegistry);
    }
    TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr(conf, true, Integer.MAX_VALUE);
    VectorizedRowBatch vectorizedRowBatch = schema.createRowBatchV2();
    // set data column count as 1.
    vectorizedRowBatch.setPartitionInfo(1, 0);
    long previousPayload = Long.MIN_VALUE;
    while (vectorizedReader.next(null, vectorizedRowBatch)) {
        assertTrue(vectorizedRowBatch.selectedInUse);
        LongColumnVector col = (LongColumnVector) vectorizedRowBatch.cols[0];
        for (int i = 0; i < vectorizedRowBatch.size; ++i) {
            int idx = vectorizedRowBatch.selected[i];
            long payload = col.vector[idx];
            long owid = (payload / NUM_ROWID_PER_OWID) + 1;
            long rowId = payload % NUM_ROWID_PER_OWID;
            assertFalse(rowId % 2 == 0 || rowId % 3 == 0);
            // Check that writeid#5 has been excluded.
            assertTrue(owid != 5);
            // Check that the data is in sorted order.
            assertTrue(payload > previousPayload);
            previousPayload = payload;
        }
    }
}
Also used : VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) TypeDescription(org.apache.orc.TypeDescription) ColumnizedDeleteEventRegistry(org.apache.hadoop.hive.ql.io.orc.VectorizedOrcAcidRowBatchReader.ColumnizedDeleteEventRegistry) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector) SortMergedDeleteEventRegistry(org.apache.hadoop.hive.ql.io.orc.VectorizedOrcAcidRowBatchReader.SortMergedDeleteEventRegistry)

Example 17 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorizedOrcAcidRowBatchReader method testDeleteEventOriginalFiltering.

public void testDeleteEventOriginalFiltering() throws Exception {
    boolean filterOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS);
    conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, false);
    // Create 3 original files with 3 rows each
    Properties properties = new Properties();
    properties.setProperty("columns", DummyOriginalRow.getColumnNamesProperty());
    properties.setProperty("columns.types", DummyOriginalRow.getColumnTypesProperty());
    OrcFile.WriterOptions writerOptions = OrcFile.writerOptions(properties, conf);
    writerOptions.inspector(originalInspector);
    Path testFilePath = new Path(root, "000000_0");
    Writer writer = OrcFile.createWriter(testFilePath, writerOptions);
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.close();
    testFilePath = new Path(root, "000000_0_copy_1");
    writer = OrcFile.createWriter(testFilePath, writerOptions);
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.close();
    testFilePath = new Path(root, "000000_0_copy_2");
    writer = OrcFile.createWriter(testFilePath, writerOptions);
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.addRow(new DummyOriginalRow(0));
    writer.close();
    conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
    conf.set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList(new long[0], new BitSet(), 1000, Long.MAX_VALUE).writeToString());
    int bucket = 0;
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).filesystem(fs).bucket(bucket).writingBase(false).minimumWriteId(1).maximumWriteId(1).inspector(inspector).reporter(Reporter.NULL).recordIdColumn(1).finalDestination(root);
    int bucketProperty = BucketCodec.V1.encode(options);
    RecordUpdater updater = new OrcRecordUpdater(root, options);
    // delete 1 row from each of the original files
    // Delete the last record in this split to test boundary conditions. It should not be present in the delete event
    // registry for the next split
    updater.delete(options.getMinimumWriteId(), new DummyRow(-1, 2, 0, bucket));
    // Delete the first record in this split to test boundary conditions. It should not be present in the delete event
    // registry for the previous split
    updater.delete(options.getMinimumWriteId(), new DummyRow(-1, 3, 0, bucket));
    updater.delete(options.getMinimumWriteId(), new DummyRow(-1, 7, 0, bucket));
    updater.close(false);
    // HWM is not important - just make sure deltas created above are read as if committed
    conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:2:" + Long.MAX_VALUE + "::");
    // Set vector mode to true int the map work so that we recognize this as a vector mode execution during the split
    // generation. Without this we will not compute the offset for the synthetic row ids.
    MapWork mapWork = new MapWork();
    mapWork.setVectorMode(true);
    VectorizedRowBatchCtx vrbContext = new VectorizedRowBatchCtx();
    mapWork.setVectorizedRowBatchCtx(vrbContext);
    HiveConf.setVar(conf, HiveConf.ConfVars.PLAN, "//tmp");
    Utilities.setMapWork(conf, mapWork);
    // now we have 3 delete events total, but for each split we should only
    // load 1 into DeleteRegistry (if filtering is on)
    List<OrcInputFormat.SplitStrategy<?>> splitStrategies = getSplitStrategies();
    assertEquals(1, splitStrategies.size());
    List<OrcSplit> splits = ((OrcInputFormat.ACIDSplitStrategy) splitStrategies.get(0)).getSplits();
    assertEquals(3, splits.size());
    assertEquals(root.toUri().toString() + File.separator + "000000_0", splits.get(0).getPath().toUri().toString());
    assertTrue(splits.get(0).isOriginal());
    assertEquals(root.toUri().toString() + File.separator + "000000_0_copy_1", splits.get(1).getPath().toUri().toString());
    assertTrue(splits.get(1).isOriginal());
    assertEquals(root.toUri().toString() + File.separator + "000000_0_copy_2", splits.get(2).getPath().toUri().toString());
    assertTrue(splits.get(2).isOriginal());
    VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, vrbContext);
    ColumnizedDeleteEventRegistry deleteEventRegistry = (ColumnizedDeleteEventRegistry) vectorizedReader.getDeleteEventRegistry();
    assertEquals("number of delete events for stripe 1", filterOn ? 1 : 3, deleteEventRegistry.size());
    OrcRawRecordMerger.KeyInterval keyInterval = vectorizedReader.getKeyInterval();
    if (filterOn) {
        assertEquals(new OrcRawRecordMerger.KeyInterval(new RecordIdentifier(0, bucketProperty, 0), new RecordIdentifier(0, bucketProperty, 2)), keyInterval);
    } else {
        assertEquals(new OrcRawRecordMerger.KeyInterval(null, null), keyInterval);
    }
    vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(1), conf, Reporter.NULL, vrbContext);
    deleteEventRegistry = (ColumnizedDeleteEventRegistry) vectorizedReader.getDeleteEventRegistry();
    assertEquals("number of delete events for stripe 2", filterOn ? 1 : 3, deleteEventRegistry.size());
    keyInterval = vectorizedReader.getKeyInterval();
    if (filterOn) {
        assertEquals(new OrcRawRecordMerger.KeyInterval(new RecordIdentifier(0, bucketProperty, 3), new RecordIdentifier(0, bucketProperty, 5)), keyInterval);
    } else {
        assertEquals(new OrcRawRecordMerger.KeyInterval(null, null), keyInterval);
    }
    vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(2), conf, Reporter.NULL, vrbContext);
    deleteEventRegistry = (ColumnizedDeleteEventRegistry) vectorizedReader.getDeleteEventRegistry();
    assertEquals("number of delete events for stripe 3", filterOn ? 1 : 3, deleteEventRegistry.size());
    keyInterval = vectorizedReader.getKeyInterval();
    if (filterOn) {
        assertEquals(new OrcRawRecordMerger.KeyInterval(new RecordIdentifier(0, bucketProperty, 6), new RecordIdentifier(0, bucketProperty, 8)), keyInterval);
    } else {
        assertEquals(new OrcRawRecordMerger.KeyInterval(null, null), keyInterval);
    }
}
Also used : Properties(java.util.Properties) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) RecordUpdater(org.apache.hadoop.hive.ql.io.RecordUpdater) Path(org.apache.hadoop.fs.Path) BitSet(java.util.BitSet) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) ColumnizedDeleteEventRegistry(org.apache.hadoop.hive.ql.io.orc.VectorizedOrcAcidRowBatchReader.ColumnizedDeleteEventRegistry)

Example 18 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class VectorizedColumnReaderTestBase method initialVectorizedRowBatchCtx.

protected static void initialVectorizedRowBatchCtx(Configuration conf) throws HiveException {
    MapWork mapWork = new MapWork();
    VectorizedRowBatchCtx rbCtx = new VectorizedRowBatchCtx();
    rbCtx.init(createStructObjectInspector(conf), new String[0]);
    mapWork.setVectorMode(true);
    mapWork.setVectorizedRowBatchCtx(rbCtx);
    Utilities.setMapWork(conf, mapWork);
}
Also used : VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) MapWork(org.apache.hadoop.hive.ql.plan.MapWork)

Example 19 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class Vectorizer method debugDisplayVertexInfo.

public void debugDisplayVertexInfo(BaseWork work) {
    VectorizedRowBatchCtx vectorizedRowBatchCtx = work.getVectorizedRowBatchCtx();
    String[] allColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
    TypeInfo[] columnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
    DataTypePhysicalVariation[] dataTypePhysicalVariations = vectorizedRowBatchCtx.getRowdataTypePhysicalVariations();
    int partitionColumnCount = vectorizedRowBatchCtx.getPartitionColumnCount();
    int virtualColumnCount = vectorizedRowBatchCtx.getVirtualColumnCount();
    String[] scratchColumnTypeNames = vectorizedRowBatchCtx.getScratchColumnTypeNames();
    DataTypePhysicalVariation[] scratchdataTypePhysicalVariations = vectorizedRowBatchCtx.getScratchDataTypePhysicalVariations();
    LOG.debug("debugDisplayVertexInfo rowColumnNames " + Arrays.toString(allColumnNames));
    LOG.debug("debugDisplayVertexInfo rowColumnTypeInfos " + Arrays.toString(columnTypeInfos));
    LOG.debug("debugDisplayVertexInfo rowDataTypePhysicalVariations " + (dataTypePhysicalVariations == null ? "NULL" : Arrays.toString(dataTypePhysicalVariations)));
    LOG.debug("debugDisplayVertexInfo partitionColumnCount " + partitionColumnCount);
    LOG.debug("debugDisplayVertexInfo virtualColumnCount " + virtualColumnCount);
    LOG.debug("debugDisplayVertexInfo scratchColumnTypeNames " + Arrays.toString(scratchColumnTypeNames));
    LOG.debug("debugDisplayVertexInfo scratchdataTypePhysicalVariations " + (scratchdataTypePhysicalVariations == null ? "NULL" : Arrays.toString(scratchdataTypePhysicalVariations)));
}
Also used : VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) DataTypePhysicalVariation(org.apache.hadoop.hive.common.type.DataTypePhysicalVariation) UDFToString(org.apache.hadoop.hive.ql.udf.UDFToString) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) StructTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo) DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo)

Example 20 with VectorizedRowBatchCtx

use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx in project hive by apache.

the class TestVectorFilterCompare method doVectorFilterCompareTest.

private void doVectorFilterCompareTest(TypeInfo typeInfo1, TypeInfo typeInfo2, List<String> columns, String[] columnNames, TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations, List<ExprNodeDesc> children, ExprNodeGenericFuncDesc exprDesc, Comparison comparison, FilterCompareTestMode filterCompareTestMode, ColumnScalarMode columnScalarMode, VectorRandomBatchSource batchSource, ObjectInspector objectInspector, TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
    HiveConf hiveConf = new HiveConf();
    if (filterCompareTestMode == FilterCompareTestMode.ADAPTOR) {
        hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE, true);
        // Don't use DECIMAL_64 with the VectorUDFAdaptor.
        dataTypePhysicalVariations = null;
    }
    VectorizationContext vectorizationContext = new VectorizationContext("name", columns, Arrays.asList(typeInfos), dataTypePhysicalVariations == null ? null : Arrays.asList(dataTypePhysicalVariations), hiveConf);
    final VectorExpressionDescriptor.Mode mode;
    switch(filterCompareTestMode) {
        case ADAPTOR:
        case COMPARE_VECTOR_EXPRESSION:
            mode = VectorExpressionDescriptor.Mode.PROJECTION;
            break;
        case FILTER_VECTOR_EXPRESSION:
            mode = VectorExpressionDescriptor.Mode.FILTER;
            break;
        default:
            throw new RuntimeException("Unexpected filter compare mode " + filterCompareTestMode);
    }
    VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc, mode);
    vectorExpression.transientInit(hiveConf);
    if (filterCompareTestMode == FilterCompareTestMode.COMPARE_VECTOR_EXPRESSION && vectorExpression instanceof VectorUDFAdaptor) {
        System.out.println("*NO NATIVE VECTOR EXPRESSION* typeInfo1 " + typeInfo1.toString() + " typeInfo2 " + typeInfo2.toString() + " " + comparison + " " + " filterCompareTestMode " + filterCompareTestMode + " columnScalarMode " + columnScalarMode + " vectorExpression " + vectorExpression.toString());
    }
    String[] outputScratchTypeNames = vectorizationContext.getScratchColumnTypeNames();
    DataTypePhysicalVariation[] outputDataTypePhysicalVariations = vectorizationContext.getScratchDataTypePhysicalVariations();
    VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, typeInfos, dataTypePhysicalVariations, /* dataColumnNums */
    null, /* partitionColumnCount */
    0, /* virtualColumnCount */
    0, /* neededVirtualColumns */
    null, outputScratchTypeNames, outputDataTypePhysicalVariations);
    VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
    VectorExtractRow resultVectorExtractRow = new VectorExtractRow();
    final int outputColumnNum = vectorExpression.getOutputColumnNum();
    resultVectorExtractRow.init(new TypeInfo[] { outputTypeInfo }, new int[] { outputColumnNum });
    Object[] scrqtchRow = new Object[1];
    // System.out.println("*VECTOR EXPRESSION* " + vectorExpression.getClass().getSimpleName());
    /*
    System.out.println(
        "*DEBUG* typeInfo1 " + typeInfo1.toString() +
        " typeInfo2 " + typeInfo2.toString() +
        " " + comparison + " " +
        " filterCompareTestMode " + filterCompareTestMode +
        " columnScalarMode " + columnScalarMode +
        " vectorExpression " + vectorExpression.toString());
    */
    final boolean isFilter = (mode == VectorExpressionDescriptor.Mode.FILTER);
    boolean copySelectedInUse = false;
    int[] copySelected = new int[VectorizedRowBatch.DEFAULT_SIZE];
    batchSource.resetBatchIteration();
    int rowIndex = 0;
    while (true) {
        if (!batchSource.fillNextBatch(batch)) {
            break;
        }
        final int originalBatchSize = batch.size;
        if (isFilter) {
            copySelectedInUse = batch.selectedInUse;
            if (batch.selectedInUse) {
                System.arraycopy(batch.selected, 0, copySelected, 0, originalBatchSize);
            }
        }
        // In filter mode, the batch size can be made smaller.
        vectorExpression.evaluate(batch);
        if (!isFilter) {
            extractResultObjects(batch, rowIndex, resultVectorExtractRow, scrqtchRow, objectInspector, resultObjects);
        } else {
            final int currentBatchSize = batch.size;
            if (copySelectedInUse && batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final int originalBatchIndex = copySelected[i];
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == originalBatchIndex) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (batch.selectedInUse) {
                int selectIndex = 0;
                for (int i = 0; i < originalBatchSize; i++) {
                    final boolean booleanResult;
                    if (selectIndex < currentBatchSize && batch.selected[selectIndex] == i) {
                        booleanResult = true;
                        selectIndex++;
                    } else {
                        booleanResult = false;
                    }
                    resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
                }
            } else if (currentBatchSize == 0) {
                // Whole batch got zapped.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(false);
                }
            } else {
                // Every row kept.
                for (int i = 0; i < originalBatchSize; i++) {
                    resultObjects[rowIndex + i] = new BooleanWritable(true);
                }
            }
        }
        rowIndex += originalBatchSize;
    }
}
Also used : VectorizationContext(org.apache.hadoop.hive.ql.exec.vector.VectorizationContext) VectorUDFAdaptor(org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor) VectorExtractRow(org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow) VectorizedRowBatchCtx(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx) VectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch) DataTypePhysicalVariation(org.apache.hadoop.hive.common.type.DataTypePhysicalVariation) BooleanWritable(org.apache.hadoop.io.BooleanWritable) HiveConf(org.apache.hadoop.hive.conf.HiveConf) VectorExpressionDescriptor(org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor) VectorExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)

Aggregations

VectorizedRowBatchCtx (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx)34 VectorizedRowBatch (org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch)14 HiveConf (org.apache.hadoop.hive.conf.HiveConf)12 VectorExtractRow (org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow)12 VectorRandomRowSource (org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource)12 VectorizationContext (org.apache.hadoop.hive.ql.exec.vector.VectorizationContext)12 VectorUDFAdaptor (org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFAdaptor)11 DataTypePhysicalVariation (org.apache.hadoop.hive.common.type.DataTypePhysicalVariation)10 TypeInfo (org.apache.hadoop.hive.serde2.typeinfo.TypeInfo)10 ArrayList (java.util.ArrayList)9 VectorExpression (org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)8 DecimalTypeInfo (org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo)8 PrimitiveTypeInfo (org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo)8 GenericUDF (org.apache.hadoop.hive.ql.udf.generic.GenericUDF)7 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)6 VectorRandomBatchSource (org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource)5 GenerationSpec (org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource.GenerationSpec)5 AcidOutputFormat (org.apache.hadoop.hive.ql.io.AcidOutputFormat)5 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)5 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)5