Search in sources :

Example 1 with Operation

use of org.apache.hadoop.hive.ql.io.AcidUtils.Operation in project hive by apache.

the class SemanticAnalyzer method genFileSinkPlan.

@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
    RowResolver inputRR = opParseCtx.get(input).getRowResolver();
    QBMetaData qbm = qb.getMetaData();
    Integer destType = qbm.getDestTypeForAlias(dest);
    // destination table if any
    Table destinationTable = null;
    // true for full ACID table and MM table
    boolean destTableIsTransactional;
    // should the destination table be written to using ACID
    boolean destTableIsFullAcid;
    // should we add files directly to the final path
    boolean isDirectInsert = false;
    AcidUtils.Operation acidOperation = null;
    boolean destTableIsTemporary = false;
    boolean destTableIsMaterialization = false;
    // destination partition if any
    Partition destinationPartition = null;
    // the intermediate destination directory
    Path queryTmpdir = null;
    String moveTaskId = null;
    // the final destination directory
    Path destinationPath = null;
    TableDesc tableDescriptor = null;
    StructObjectInspector specificRowObjectInspector = null;
    int currentTableId = 0;
    boolean isLocal = false;
    SortBucketRSCtx rsCtx = new SortBucketRSCtx();
    DynamicPartitionCtx dpCtx = null;
    LoadTableDesc ltd = null;
    ListBucketingCtx lbCtx = null;
    Map<String, String> partSpec = null;
    boolean isMmTable = false, isMmCreate = false, isNonNativeTable = false;
    Long writeId = null;
    HiveTxnManager txnMgr = getTxnMgr();
    switch(destType.intValue()) {
        case QBMetaData.DEST_TABLE:
            {
                destinationTable = qbm.getDestTableForAlias(dest);
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                destTableIsTemporary = destinationTable.isTemporary();
                // Is the user trying to insert into a external tables
                checkExternalTable(destinationTable);
                partSpec = qbm.getPartSpecForAlias(dest);
                destinationPath = destinationTable.getPath();
                checkImmutableTable(qb, destinationTable, destinationPath, false);
                // check for partition
                List<FieldSchema> parts = destinationTable.getPartitionKeys();
                if (parts != null && parts.size() > 0) {
                    // table is partitioned
                    if (partSpec == null || partSpec.size() == 0) {
                        // user did NOT specify partition
                        throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
                    }
                    dpCtx = qbm.getDPCtx(dest);
                    if (dpCtx == null) {
                        destinationTable.validatePartColumnNames(partSpec, false);
                        dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                        qbm.setDPCtx(dest, dpCtx);
                    }
                }
                // Check for dynamic partitions.
                dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest);
                if (dpCtx != null && dpCtx.getSPPath() != null) {
                    destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath());
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + destinationPath);
                }
                if (dpCtx != null) {
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, parts);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                // NOTE: specify Dynamic partitions in dest_tab for WriteEntity
                if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                        checkAcidConstraints();
                    } else {
                        lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), destinationTable.isStoredAsSubDirectories());
                    }
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = null;
                        } else {
                            if (isMmTable) {
                                writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            } else {
                                writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                            }
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    boolean isReplace = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId);
                    if (writeId != null) {
                        ltd.setStmtId(txnMgr.getCurrentStmtId());
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                    // deltas and base and leave them up to the cleaner to clean up
                    boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
                    LoadFileType loadType;
                    if (isDirectInsert) {
                        loadType = LoadFileType.IGNORE;
                    } else if (!isInsertInto && !destTableIsTransactional) {
                        loadType = LoadFileType.REPLACE_ALL;
                    } else {
                        loadType = LoadFileType.KEEP_EXISTING;
                    }
                    ltd.setLoadFileType(loadType);
                    ltd.setInsertOverwrite(!isInsertInto);
                    ltd.setIsDirectInsert(isDirectInsert);
                    ltd.setLbCtx(lbCtx);
                    loadTableWork.add(ltd);
                } else {
                    // This is a non-native table.
                    // We need to set stats as inaccurate.
                    setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName());
                    // true if it is insert overwrite.
                    boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName()));
                    createPreInsertDesc(destinationTable, overwrite);
                    ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec);
                    ltd.setInsertOverwrite(overwrite);
                    ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING);
                }
                if (destinationTable.isMaterializedView()) {
                    materializedViewUpdateDesc = new MaterializedViewUpdateDesc(destinationTable.getFullyQualifiedName(), false, false, true);
                }
                WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
                ctx.getLoadTableOutputMap().put(ltd, output);
                break;
            }
        case QBMetaData.DEST_PARTITION:
            {
                destinationPartition = qbm.getDestPartitionForAlias(dest);
                destinationTable = destinationPartition.getTable();
                destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                checkExternalTable(destinationTable);
                Path partPath = destinationPartition.getDataLocation();
                checkImmutableTable(qb, destinationTable, partPath, true);
                // Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs.
                // The changes in HIVE-19891 appears to no longer support that behavior.
                destinationPath = partPath;
                if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) {
                    try {
                        String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, destinationTable, destinationPartition.getSpec());
                        String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
                        throw new SemanticException(message);
                    } catch (SemanticException err) {
                        throw err;
                    } catch (HiveException err) {
                        throw new SemanticException(err);
                    }
                }
                isNonNativeTable = destinationTable.isNonNative();
                isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
                AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                // this table_desc does not contain the partitioning columns
                tableDescriptor = Utilities.getTableDesc(destinationTable);
                if (!isNonNativeTable) {
                    if (destTableIsTransactional) {
                        acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    }
                }
                isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
                acidOperation = acidOp;
                queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
                moveTaskId = getMoveTaskId();
                if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                    Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + destinationPath);
                }
                // Add NOT NULL constraint check
                input = genConstraintsPlan(dest, qb, input);
                if (!qb.getIsQuery()) {
                    input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, null);
                }
                if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
                    // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                    // TODO: We only do this for a full rebuild
                    String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
                    String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
                    if (sortColsStr != null || distributeColsStr != null) {
                        input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
                    }
                } else {
                    // Add sorting/bucketing if needed
                    input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
                }
                idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
                currentTableId = destTableId;
                destTableId++;
                if (destTableIsTransactional) {
                    acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                    checkAcidConstraints();
                } else {
                    // Transactional tables can't be list bucketed or have skewed cols
                    lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), destinationPartition.isStoredAsSubDirectories());
                }
                try {
                    if (ctx.getExplainConfig() != null) {
                        // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                        writeId = null;
                    } else {
                        if (isMmTable) {
                            writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        } else {
                            writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
                        }
                    }
                } catch (LockException ex) {
                    throw new SemanticException("Failed to allocate write Id", ex);
                }
                ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId);
                if (writeId != null) {
                    ltd.setStmtId(txnMgr.getCurrentStmtId());
                }
                // For the current context for generating File Sink Operator, it is either INSERT INTO or INSERT OVERWRITE.
                // So the next line works.
                boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
                // For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
                // deltas and base and leave them up to the cleaner to clean up
                LoadFileType loadType;
                if (isDirectInsert) {
                    loadType = LoadFileType.IGNORE;
                } else if (!isInsertInto && !destTableIsTransactional) {
                    loadType = LoadFileType.REPLACE_ALL;
                } else {
                    loadType = LoadFileType.KEEP_EXISTING;
                }
                ltd.setLoadFileType(loadType);
                ltd.setInsertOverwrite(!isInsertInto);
                ltd.setIsDirectInsert(isDirectInsert);
                ltd.setLbCtx(lbCtx);
                ltd.setMoveTaskId(moveTaskId);
                loadTableWork.add(ltd);
                if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
                    throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
                }
                break;
            }
        case QBMetaData.DEST_LOCAL_FILE:
            isLocal = true;
        // fall through
        case QBMetaData.DEST_DFS_FILE:
            {
                destinationPath = getDestinationFilePath(qbm.getDestFileForAlias(dest), isMmTable);
                // CTAS case: the file output format and serde are defined by the create
                // table command rather than taking the default value
                List<FieldSchema> fieldSchemas = null;
                List<FieldSchema> partitionColumns = null;
                List<String> partitionColumnNames = null;
                List<FieldSchema> sortColumns = null;
                List<String> sortColumnNames = null;
                List<FieldSchema> distributeColumns = null;
                List<String> distributeColumnNames = null;
                List<ColumnInfo> fileSinkColInfos = null;
                List<ColumnInfo> sortColInfos = null;
                List<ColumnInfo> distributeColInfos = null;
                TableName tableName = null;
                Map<String, String> tblProps = null;
                CreateTableDesc tblDesc = qb.getTableDesc();
                CreateMaterializedViewDesc viewDesc = qb.getViewDesc();
                if (tblDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = tblDesc.getPartColNames();
                    fileSinkColInfos = new ArrayList<>();
                    destTableIsTemporary = tblDesc.isTemporary();
                    destTableIsMaterialization = tblDesc.isMaterialization();
                    tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName());
                    tblProps = tblDesc.getTblProps();
                } else if (viewDesc != null) {
                    fieldSchemas = new ArrayList<>();
                    partitionColumns = new ArrayList<>();
                    partitionColumnNames = viewDesc.getPartColNames();
                    sortColumns = new ArrayList<>();
                    sortColumnNames = viewDesc.getSortColNames();
                    distributeColumns = new ArrayList<>();
                    distributeColumnNames = viewDesc.getDistributeColNames();
                    fileSinkColInfos = new ArrayList<>();
                    sortColInfos = new ArrayList<>();
                    distributeColInfos = new ArrayList<>();
                    destTableIsTemporary = false;
                    destTableIsMaterialization = false;
                    tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName());
                    tblProps = viewDesc.getTblProps();
                }
                destTableIsTransactional = tblProps != null && AcidUtils.isTablePropertyTransactional(tblProps);
                if (destTableIsTransactional) {
                    try {
                        if (ctx.getExplainConfig() != null) {
                            // For explain plan, txn won't be opened and doesn't make sense to allocate write id
                            writeId = 0L;
                        } else {
                            writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable());
                        }
                    } catch (LockException ex) {
                        throw new SemanticException("Failed to allocate write Id", ex);
                    }
                    if (AcidUtils.isInsertOnlyTable(tblProps, true)) {
                        isMmTable = isMmCreate = true;
                        if (tblDesc != null) {
                            tblDesc.setInitialMmWriteId(writeId);
                        } else {
                            viewDesc.setInitialMmWriteId(writeId);
                        }
                    }
                }
                if (isLocal) {
                    assert !isMmTable;
                    // for local directory - we always write to map-red intermediate
                    // store and then copy to local fs
                    queryTmpdir = ctx.getMRTmpPath();
                } else {
                    // no copy is required. we may want to revisit this policy in future
                    try {
                        Path qPath = FileUtils.makeQualified(destinationPath, conf);
                        queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
                        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                            Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + destinationPath + " (" + isMmTable + ")");
                        }
                    } catch (Exception e) {
                        throw new SemanticException("Error creating temporary folder on: " + destinationPath, e);
                    }
                }
                // Check for dynamic partitions.
                final String cols, colTypes;
                final boolean isPartitioned;
                if (dpCtx != null) {
                    throw new SemanticException("Dynamic partition context has already been created, this should not happen");
                }
                if (!CollectionUtils.isEmpty(partitionColumnNames)) {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, partitionColumnNames, sortColumnNames, distributeColumnNames, fieldSchemas, partitionColumns, sortColumns, distributeColumns, fileSinkColInfos, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    dpCtx = new DynamicPartitionCtx(partitionColumnNames, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
                    qbm.setDPCtx(dest, dpCtx);
                    // set the root of the temporary path where dynamic partition columns will populate
                    dpCtx.setRootPath(queryTmpdir);
                    isPartitioned = true;
                } else {
                    ColsAndTypes ct = deriveFileSinkColTypes(inputRR, sortColumnNames, distributeColumnNames, fieldSchemas, sortColumns, distributeColumns, sortColInfos, distributeColInfos);
                    cols = ct.cols;
                    colTypes = ct.colTypes;
                    isPartitioned = false;
                }
                // update the create table descriptor with the resulting schema.
                if (tblDesc != null) {
                    tblDesc.setCols(new ArrayList<>(fieldSchemas));
                    tblDesc.setPartCols(new ArrayList<>(partitionColumns));
                } else if (viewDesc != null) {
                    viewDesc.setSchema(new ArrayList<>(fieldSchemas));
                    viewDesc.setPartCols(new ArrayList<>(partitionColumns));
                    if (viewDesc.isOrganized()) {
                        viewDesc.setSortCols(new ArrayList<>(sortColumns));
                        viewDesc.setDistributeCols(new ArrayList<>(distributeColumns));
                    }
                }
                boolean isDestTempFile = true;
                if (ctx.isMRTmpFileURI(destinationPath.toUri().toString()) == false && ctx.isResultCacheDir(destinationPath) == false) {
                    // not a temp dir and not a result cache dir
                    idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString());
                    currentTableId = destTableId;
                    destTableId++;
                    isDestTempFile = false;
                }
                if (tblDesc == null) {
                    if (viewDesc != null) {
                        tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
                    } else if (qb.getIsQuery()) {
                        Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
                        String fileFormat = conf.getResultFileFormat().toString();
                        if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
                            serdeClass = ThriftJDBCBinarySerDe.class;
                            fileFormat = ResultFileFormat.SEQUENCEFILE.toString();
                            // Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
                            // write out formatted thrift objects to SequenceFile
                            conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
                        } else if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
                            // If this output format is Llap, check to see if Arrow is requested
                            boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW);
                            serdeClass = useArrow ? ArrowColumnarBatchSerDe.class : LazyBinarySerDe2.class;
                        }
                        tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
                    } else {
                        tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
                    }
                } else {
                    tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
                }
                // if available, set location in table desc properties
                if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor != null && !tableDescriptor.getProperties().containsKey(hive_metastoreConstants.META_TABLE_LOCATION)) {
                    tableDescriptor.getProperties().setProperty(hive_metastoreConstants.META_TABLE_LOCATION, tblDesc.getLocation());
                }
                // We need a specific rowObjectInspector in this case
                try {
                    specificRowObjectInspector = (StructObjectInspector) tableDescriptor.getDeserializer(conf).getObjectInspector();
                } catch (Exception e) {
                    throw new SemanticException(e.getMessage(), e);
                }
                boolean isDfsDir = (destType == QBMetaData.DEST_DFS_FILE);
                try {
                    destinationTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc != null ? viewDesc.toTable(conf) : null;
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
                destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
                // Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
                if (viewDesc != null && viewDesc.isOrganized()) {
                    input = genMaterializedViewDataOrgPlan(sortColInfos, distributeColInfos, inputRR, input);
                }
                moveTaskId = getMoveTaskId();
                if (isPartitioned) {
                    // Create a SELECT that may reorder the columns if needed
                    RowResolver rowResolver = new RowResolver();
                    List<ExprNodeDesc> columnExprs = new ArrayList<>();
                    List<String> colNames = new ArrayList<>();
                    Map<String, ExprNodeDesc> colExprMap = new HashMap<>();
                    for (int i = 0; i < fileSinkColInfos.size(); i++) {
                        ColumnInfo ci = fileSinkColInfos.get(i);
                        ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci);
                        String name = getColumnInternalName(i);
                        rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false));
                        columnExprs.add(columnExpr);
                        colNames.add(name);
                        colExprMap.put(name, columnExpr);
                    }
                    input = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver.getColumnInfos()), input), rowResolver);
                    input.setColumnExprMap(colExprMap);
                    // If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc
                    // object. Although the table does not exist in metastore, we will swap the CreateTableTask
                    // and MoveTask resulting from this LoadTable so in this specific case, first we create
                    // the metastore table, then we move and commit the partitions. At least for the time being,
                    // this order needs to be enforced because metastore expects a table to exist before we can
                    // add any partitions to it.
                    isNonNativeTable = tableDescriptor.isNonNative();
                    if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
                        AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
                        if (destTableIsTransactional) {
                            acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
                            checkAcidConstraints();
                        }
                        // isReplace = false in case concurrent operation is executed
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
                        if (writeId != null) {
                            ltd.setStmtId(txnMgr.getCurrentStmtId());
                        }
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                        ltd.setInsertOverwrite(false);
                        loadTableWork.add(ltd);
                    } else {
                        // This is a non-native table.
                        // We need to set stats as inaccurate.
                        setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName());
                        ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec());
                        ltd.setInsertOverwrite(false);
                        ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
                    }
                    ltd.setMoveTaskId(moveTaskId);
                    ltd.setMdTable(destinationTable);
                    WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
                    ctx.getLoadTableOutputMap().put(ltd, output);
                } else {
                    // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
                    LoadFileDesc loadFileDesc = new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transactional', one before 'acid'
                    destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCreate);
                    loadFileDesc.setMoveTaskId(moveTaskId);
                    loadFileWork.add(loadFileDesc);
                    try {
                        Path qualifiedPath = destinationPath.getFileSystem(conf).makeQualified(destinationPath);
                        if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) {
                            throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationPath.toUri().toString()));
                        }
                    } catch (IOException ex) {
                        throw new SemanticException("Error while getting the full qualified path for the given directory: " + ex.getMessage());
                    }
                }
                break;
            }
        default:
            throw new SemanticException("Unknown destination type: " + destType);
    }
    inputRR = opParseCtx.get(input).getRowResolver();
    List<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
    if (updating(dest) || deleting(dest)) {
        vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
    } else {
        try {
            // If we already have a specific inspector (view or directory as a target) use that
            // Otherwise use the table deserializer to get the inspector
            StructObjectInspector rowObjectInspector = specificRowObjectInspector != null ? specificRowObjectInspector : (StructObjectInspector) destinationTable.getDeserializer().getObjectInspector();
            List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
            for (StructField field : fields) {
                vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), "", false));
            }
        } catch (Exception e) {
            throw new SemanticException(e.getMessage(), e);
        }
    }
    RowSchema fsRS = new RowSchema(vecCol);
    // The output files of a FileSink can be merged if they are either not being written to a table
    // or are being written to a table which is not bucketed
    // and table the table is not sorted
    boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0)));
    // If this table is working with ACID semantics, turn off merging
    canBeMerged &= !destTableIsFullAcid;
    // Generate the partition columns from the parent input
    if (destType == QBMetaData.DEST_TABLE || destType == QBMetaData.DEST_PARTITION) {
        genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
    }
    FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, // this was 1/4 acid
    destinationPath, // this was 1/4 acid
    currentTableId, // this was 1/4 acid
    destTableIsFullAcid, // this was 1/4 acid
    destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, destinationTable, writeId, isMmCreate, destType, qb, isDirectInsert, acidOperation, moveTaskId);
    if (isMmCreate) {
        // Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
        if (tableDesc != null) {
            tableDesc.setWriter(fileSinkDesc);
        } else {
            createVwDesc.setWriter(fileSinkDesc);
        }
    }
    if (fileSinkDesc.getInsertOverwrite()) {
        if (ltd != null) {
            ltd.setInsertOverwrite(true);
        }
    }
    if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) {
        fileSinkDesc.setIsUsingBatchingSerDe(true);
    } else {
        fileSinkDesc.setIsUsingBatchingSerDe(false);
    }
    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
    // in case of a merge statement.
    if (!isDirectInsert || acidOperation == AcidUtils.Operation.INSERT) {
        handleLineage(ltd, output);
    }
    setWriteIdForSurrogateKeys(ltd, input);
    LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
    FileSinkOperator fso = (FileSinkOperator) output;
    fso.getConf().setTable(destinationTable);
    // and it is an insert overwrite or insert into table
    if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) && !destTableIsTemporary && !destTableIsMaterialization && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
        if (destType == QBMetaData.DEST_TABLE) {
            genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_PARTITION) {
            genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
        } else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
            // CTAS or CMV statement
            genAutoColumnStatsGatheringPipeline(destinationTable, null, input, false, true);
        }
    }
    return output;
}
Also used : LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableName(org.apache.hadoop.hive.common.TableName) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) PatternSyntaxException(java.util.regex.PatternSyntaxException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(java.security.AccessControlException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation)

Example 2 with Operation

use of org.apache.hadoop.hive.ql.io.AcidUtils.Operation in project hive by apache.

the class SemanticAnalyzer method genBucketingSortingDest.

@SuppressWarnings("nls")
private Operator genBucketingSortingDest(String dest, Operator input, QB qb, TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
    // If the table is bucketed, and bucketing is enforced, do the following:
    // If the number of buckets is smaller than the number of maximum reducers,
    // create those many reducers.
    // If not, create a multiFileSink instead of FileSink - the multiFileSink will
    // spray the data into multiple buckets. That way, we can support a very large
    // number of buckets without needing a very large number of reducers.
    boolean enforceBucketing = false;
    List<ExprNodeDesc> partnCols = new ArrayList<>();
    List<ExprNodeDesc> sortCols = new ArrayList<>();
    List<Integer> sortOrders = new ArrayList<>();
    boolean multiFileSpray = false;
    int numFiles = 1;
    int totalFiles = 1;
    boolean isCompaction = false;
    if (dest_tab != null && dest_tab.getParameters() != null) {
        isCompaction = AcidUtils.isCompactionTable(dest_tab.getParameters());
    }
    StringBuilder order = new StringBuilder();
    StringBuilder nullOrder = new StringBuilder();
    if (dest_tab.getNumBuckets() > 0 && !dest_tab.getBucketCols().isEmpty()) {
        enforceBucketing = true;
        if (updating(dest) || deleting(dest)) {
            partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
            sortCols = getPartitionColsFromBucketColsForUpdateDelete(input, false);
            createSortOrderForUpdateDelete(sortCols, order, nullOrder);
        } else {
            partnCols = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, false);
        }
    } else {
        if (updating(dest) || deleting(dest)) {
            partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
            enforceBucketing = true;
        }
    }
    if ((dest_tab.getSortCols() != null) && (dest_tab.getSortCols().size() > 0)) {
        sortCols = getSortCols(dest, qb, dest_tab, table_desc, input);
        getSortOrders(dest_tab, order, nullOrder);
        if (!enforceBucketing) {
            throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
        }
    } else if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SORT_WHEN_BUCKETING) && enforceBucketing && !updating(dest) && !deleting(dest)) {
        sortCols = new ArrayList<>();
        for (ExprNodeDesc expr : partnCols) {
            sortCols.add(expr.clone());
            order.append(DirectionUtils.codeToSign(DirectionUtils.ASCENDING_CODE));
            nullOrder.append(NullOrdering.NULLS_FIRST.getSign());
        }
    }
    if (enforceBucketing) {
        Operation acidOp = AcidUtils.isFullAcidTable(dest_tab) ? getAcidType(table_desc.getOutputFileFormatClass(), dest, AcidUtils.isInsertOnlyTable(dest_tab)) : Operation.NOT_ACID;
        int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
        if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) {
            maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
        }
        int numBuckets = dest_tab.getNumBuckets();
        if (numBuckets > maxReducers) {
            LOG.debug("numBuckets is {} and maxReducers is {}", numBuckets, maxReducers);
            multiFileSpray = true;
            totalFiles = numBuckets;
            if (totalFiles % maxReducers == 0) {
                numFiles = totalFiles / maxReducers;
            } else {
                // find the number of reducers such that it is a divisor of totalFiles
                maxReducers = getReducersBucketing(totalFiles, maxReducers);
                numFiles = totalFiles / maxReducers;
            }
        } else {
            maxReducers = numBuckets;
        }
        input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(), maxReducers, acidOp, isCompaction);
        reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator) input.getParentOperators().get(0));
        ctx.setMultiFileSpray(multiFileSpray);
        ctx.setNumFiles(numFiles);
        ctx.setTotalFiles(totalFiles);
    }
    return input;
}
Also used : ArrayList(java.util.ArrayList) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)

Example 3 with Operation

use of org.apache.hadoop.hive.ql.io.AcidUtils.Operation in project hive by apache.

the class SemanticAnalyzer method analyzeInternal.

@SuppressWarnings("checkstyle:methodlength")
void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticException {
    LOG.info("Starting Semantic Analysis");
    // 1. Generate Resolved Parse tree from syntax tree
    boolean needsTransform = needsTransform();
    // change the location of position alias process here
    processPositionAlias(ast);
    cacheTableHelper.populateCache(ctx.getParsedTables(), conf, getTxnMgr());
    PlannerContext plannerCtx = pcf.get();
    if (!genResolvedParseTree(ast, plannerCtx)) {
        return;
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) {
        for (String alias : qb.getSubqAliases()) {
            removeOBInSubQuery(qb.getSubqForAlias(alias));
        }
    }
    final String llapIOETLSkipFormat = HiveConf.getVar(conf, ConfVars.LLAP_IO_ETL_SKIP_FORMAT);
    if (qb.getParseInfo().hasInsertTables() || qb.isCTAS()) {
        if (llapIOETLSkipFormat.equalsIgnoreCase("encode")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENCODE_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO encode as ETL query is detected");
        } else if (llapIOETLSkipFormat.equalsIgnoreCase("all")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO as ETL query is detected");
        }
    }
    // Check query results cache.
    // If no masking/filtering required, then we can check the cache now, before
    // generating the operator tree and going through CBO.
    // Otherwise we have to wait until after the masking/filtering step.
    boolean isCacheEnabled = isResultsCacheEnabled();
    QueryResultsCache.LookupInfo lookupInfo = null;
    if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    ASTNode astForMasking;
    if (isCBOExecuted() && needsTransform && (qb.isCTAS() || forViewCreation || qb.isMaterializedView() || qb.isMultiDestQuery())) {
        // If we use CBO and we may apply masking/filtering policies, we create a copy of the ast.
        // The reason is that the generation of the operator tree may modify the initial ast,
        // but if we need to parse for a second time, we would like to parse the unmodified ast.
        astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(ast);
    } else {
        astForMasking = ast;
    }
    // 2. Gen OP Tree from resolved Parse Tree
    sinkOp = genOPTree(ast, plannerCtx);
    boolean usesMasking = false;
    if (!forViewCreation && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && (tableMask.isEnabled() && analyzeRewrite == null)) {
        // Here we rewrite the * and also the masking table
        ParseResult rewrittenResult = rewriteASTWithMaskAndFilter(tableMask, astForMasking, ctx.getTokenRewriteStream(), ctx, db);
        ASTNode rewrittenAST = rewrittenResult.getTree();
        if (astForMasking != rewrittenAST) {
            usesMasking = true;
            plannerCtx = pcf.get();
            ctx.setSkipTableMasking(true);
            ctx.setTokenRewriteStream(rewrittenResult.getTokenRewriteStream());
            init(true);
            // change the location of position alias process here
            processPositionAlias(rewrittenAST);
            genResolvedParseTree(rewrittenAST, plannerCtx);
            if (this instanceof CalcitePlanner) {
                ((CalcitePlanner) this).resetCalciteConfiguration();
            }
            sinkOp = genOPTree(rewrittenAST, plannerCtx);
        }
    }
    // validate if this sink operation is allowed for non-native tables
    if (sinkOp instanceof FileSinkOperator) {
        FileSinkOperator fileSinkOperator = (FileSinkOperator) sinkOp;
        Optional<HiveStorageHandler> handler = Optional.ofNullable(fileSinkOperator).map(FileSinkOperator::getConf).map(FileSinkDesc::getTable).map(Table::getStorageHandler);
        if (handler.isPresent()) {
            handler.get().validateSinkDesc(fileSinkOperator.getConf());
        }
    }
    // TODO: Enable caching for queries with masking/filtering
    if (isCacheEnabled && needsTransform && !usesMasking && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    // 3. Deduce Resultset Schema
    if ((forViewCreation || createVwDesc != null) && !this.ctx.isCboSucceeded()) {
        resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
    } else {
        // succeeds.
        if (resultSchema == null) {
            resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
        }
    }
    // 4. Generate Parse Context for Optimizer & Physical compiler
    copyInfoToQueryProperties(queryProperties);
    ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet<JoinOperator>(joinContext.keySet()), new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()), loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, queryProperties, viewProjectToTableSchema);
    // Set the semijoin hints in parse context
    pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
    // Set the mapjoin hint if it needs to be disabled.
    pCtx.setDisableMapJoin(disableMapJoinWithHint(getQB().getParseInfo().getHintList()));
    if (forViewCreation) {
        // Generate lineage info if LineageLogger hook is configured.
        // Add the transformation that computes the lineage information.
        Set<String> postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings().split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS))));
        if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) {
            List<Transform> transformations = new ArrayList<Transform>();
            transformations.add(new HiveOpConverterPostProc());
            transformations.add(new Generator(postExecHooks));
            for (Transform t : transformations) {
                pCtx = t.transform(pCtx);
            }
        }
    }
    // 5. Take care of view creation
    if (createVwDesc != null) {
        if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
            return;
        }
        if (!ctx.isCboSucceeded()) {
            saveViewDefinition();
        }
        // validate the create view statement at this point, the createVwDesc gets
        // all the information for semanticcheck
        validateCreateView();
        createVwDesc.setTablesUsed(pCtx.getTablesUsed());
    }
    // it means that in step 2, the ColumnAccessInfo was already created
    if (!forViewCreation || getColumnAccessInfo() == null) {
        // 6. Generate table access stats if required
        if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS)) {
            TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
            setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
        }
        AuxOpTreeSignature.linkAuxSignatures(pCtx);
        // 7. Perform Logical optimization
        if (LOG.isDebugEnabled()) {
            LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        Optimizer optm = new Optimizer();
        optm.setPctx(pCtx);
        optm.initialize(conf);
        pCtx = optm.optimize();
        if (pCtx.getColumnAccessInfo() != null) {
            // set ColumnAccessInfo for view column authorization
            setColumnAccessInfo(pCtx.getColumnAccessInfo());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        // 8. Generate column access stats if required - wait until column pruning
        // takes place during optimization
        boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
        if (isColumnInfoNeedForAuth || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
            ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
            // view column access info is carried by this.getColumnAccessInfo().
            setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess(this.getColumnAccessInfo()));
        }
    }
    if (forViewCreation) {
        return;
    }
    // 9. Optimize Physical op tree & Translate to target execution engine (MR,
    // TEZ..)
    compilePlan(pCtx);
    // find all Acid FileSinkOperatorS
    new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
    // 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
    final Optional<TezTask> optionalTezTask = rootTasks.stream().filter(task -> task instanceof TezTask).map(task -> (TezTask) task).findFirst();
    if (optionalTezTask.isPresent()) {
        final TezTask tezTask = optionalTezTask.get();
        rootTasks.stream().filter(task -> task.getWork() instanceof DDLWork).map(task -> (DDLWork) task.getWork()).filter(ddlWork -> ddlWork.getDDLDesc() instanceof PreInsertTableDesc).map(ddlWork -> (PreInsertTableDesc) ddlWork.getDDLDesc()).map(desc -> new InsertCommitHookDesc(desc.getTable(), desc.isOverwrite())).forEach(insertCommitHookDesc -> tezTask.addDependentTask(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf)));
    }
    LOG.info("Completed plan generation");
    // 11. put accessed columns to readEntity
    if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
        putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
    }
    if (isCacheEnabled && lookupInfo != null) {
        if (queryCanBeCached()) {
            // requires SemanticAnalyzer state to be reset.
            if (checkResultsCache(lookupInfo, true)) {
                LOG.info("Cached result found on second lookup");
            } else {
                QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
                // Specify that the results of this query can be cached.
                setCacheUsage(new CacheUsage(CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS, queryInfo));
            }
        }
    }
}
Also used : AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) StringUtils(org.apache.commons.lang3.StringUtils) TypeCheckCtx(org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx) TableScanDesc(org.apache.hadoop.hive.ql.plan.TableScanDesc) AbstractSerDe(org.apache.hadoop.hive.serde2.AbstractSerDe) IntMath(com.google.common.math.IntMath) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) ArchiveUtils(org.apache.hadoop.hive.ql.exec.ArchiveUtils) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) QueryProperties(org.apache.hadoop.hive.ql.QueryProperties) OrderExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression) UDTFDesc(org.apache.hadoop.hive.ql.plan.UDTFDesc) GenericUDFSurrogateKey(org.apache.hadoop.hive.ql.udf.generic.GenericUDFSurrogateKey) TokenRewriteStream(org.antlr.runtime.TokenRewriteStream) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) AbstractCreateViewAnalyzer.validateTablesUsed(org.apache.hadoop.hive.ql.ddl.view.create.AbstractCreateViewAnalyzer.validateTablesUsed) MetadataTypedColumnsetSerDe(org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe) FileUtils(org.apache.hadoop.hive.common.FileUtils) AuxOpTreeSignature(org.apache.hadoop.hive.ql.plan.mapper.AuxOpTreeSignature) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) ScriptDesc(org.apache.hadoop.hive.ql.plan.ScriptDesc) TypeInfoUtils(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Supplier(java.util.function.Supplier) LinkedHashMap(java.util.LinkedHashMap) ExprNodeFieldDesc(org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc) Strings(com.google.common.base.Strings) ConstraintsUtils(org.apache.hadoop.hive.ql.ddl.table.constraint.ConstraintsUtils) Lists(com.google.common.collect.Lists) SemanticDispatcher(org.apache.hadoop.hive.ql.lib.SemanticDispatcher) SemanticGraphWalker(org.apache.hadoop.hive.ql.lib.SemanticGraphWalker) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) TypeCheckProcFactory(org.apache.hadoop.hive.ql.parse.type.TypeCheckProcFactory) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) PartitionedTableFunctionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionedTableFunctionSpec) NullOrdering(org.apache.hadoop.hive.ql.util.NullOrdering) SubQueryType(org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType) HiveUtils(org.apache.hadoop.hive.ql.metadata.HiveUtils) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) PTFQueryInputType(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputType) IOException(java.io.IOException) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Partition(org.apache.hadoop.hive.ql.metadata.Partition) TreeMap(java.util.TreeMap) DirectionUtils(org.apache.hadoop.hive.ql.util.DirectionUtils) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) ReflectionUtils(org.apache.hadoop.util.ReflectionUtils) PTFExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef) SortedSet(java.util.SortedSet) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) WindowFrameSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) Description(org.apache.hadoop.hive.ql.exec.Description) RecordWriter(org.apache.hadoop.hive.ql.exec.RecordWriter) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) PartitionExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression) HiveIgnoreKeyTextOutputFormat(org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat) Token(org.antlr.runtime.Token) ReduceField(org.apache.hadoop.hive.ql.exec.Utilities.ReduceField) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) UnionProcContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext) UnionDesc(org.apache.hadoop.hive.ql.plan.UnionDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) PartitioningSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec) Collection(java.util.Collection) Order(org.apache.hadoop.hive.metastore.api.Order) UUID(java.util.UUID) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) ThriftJDBCBinarySerDe(org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe) LongMath(com.google.common.math.LongMath) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) ASTBuilder(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder) Entry(java.util.Map.Entry) Queue(java.util.Queue) Objects.nonNull(java.util.Objects.nonNull) ISubQueryJoinInfo(org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ResultFileFormat(org.apache.hadoop.hive.conf.HiveConf.ResultFileFormat) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) FilterDesc(org.apache.hadoop.hive.ql.plan.FilterDesc) HIVE_DEFAULT_STORAGE_HANDLER(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DEFAULT_STORAGE_HANDLER) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) TABLE_IS_CTAS(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTAS) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) HashSet(java.util.HashSet) Category(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category) OrderExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef) Utils(org.apache.hadoop.hive.shims.Utils) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) LinkedList(java.util.LinkedList) TreeVisitorAction(org.antlr.runtime.tree.TreeVisitorAction) NullStructSerDe(org.apache.hadoop.hive.serde2.NullStructSerDe) SerDeUtils(org.apache.hadoop.hive.serde2.SerDeUtils) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) ForwardDesc(org.apache.hadoop.hive.ql.plan.ForwardDesc) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) StrictChecks(org.apache.hadoop.hive.conf.HiveConf.StrictChecks) ObjectInspectorFactory(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ExprNodeDescUtils(org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils) ExprNodeColumnListDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc) MetricsConstant(org.apache.hadoop.hive.common.metrics.common.MetricsConstant) WindowFunctionInfo(org.apache.hadoop.hive.ql.exec.WindowFunctionInfo) Arrays(java.util.Arrays) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenericUDFMurmurHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFMurmurHash) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TaskFactory(org.apache.hadoop.hive.ql.exec.TaskFactory) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) StatDB(org.apache.hadoop.hive.common.StatsSetupConst.StatDB) ExprNodeEvaluatorFactory(org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) org.apache.hadoop.hive.serde.serdeConstants(org.apache.hadoop.hive.serde.serdeConstants) Set(java.util.Set) FunctionInfo(org.apache.hadoop.hive.ql.exec.FunctionInfo) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) AlterTableUnsetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableUnsetPropertiesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) GenericUDFOPOr(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr) ArrowColumnarBatchSerDe(org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) Mode(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode) ReduceSinkDesc(org.apache.hadoop.hive.ql.plan.ReduceSinkDesc) GenericUDFArray(org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) ClassicToken(org.antlr.runtime.ClassicToken) GenericUDFHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) Direction(org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) TreeSet(java.util.TreeSet) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) Task(org.apache.hadoop.hive.ql.exec.Task) UnsupportedFeature(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BoundarySpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) StringInternUtils(org.apache.hadoop.hive.common.StringInternUtils) PlanUtils(org.apache.hadoop.hive.ql.plan.PlanUtils) DYNAMICPARTITIONCONVERT(org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT) WindowSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec) LateralViewJoinDesc(org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) Table(org.apache.hadoop.hive.ql.metadata.Table) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Node(org.apache.hadoop.hive.ql.lib.Node) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) IOUtils(org.apache.hadoop.io.IOUtils) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) OperatorFactory(org.apache.hadoop.hive.ql.exec.OperatorFactory) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) PartitionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec) TableType(org.apache.hadoop.hive.metastore.TableType) ConstantObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector) ArrayDeque(java.util.ArrayDeque) CommonToken(org.antlr.runtime.CommonToken) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeTypeCheck(org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) ResourceDownloader(org.apache.hadoop.hive.ql.util.ResourceDownloader) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) PTFInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFInputSpec) HIVESTATSDBCLASS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS) PartitionedTableFunctionDef(org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) Context(org.apache.hadoop.hive.ql.Context) PTFDesc(org.apache.hadoop.hive.ql.plan.PTFDesc) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) Splitter(com.google.common.base.Splitter) OutputFormat(org.apache.hadoop.mapred.OutputFormat) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) PatternSyntaxException(java.util.regex.PatternSyntaxException) TreeVisitor(org.antlr.runtime.tree.TreeVisitor) ImmutableMap(com.google.common.collect.ImmutableMap) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) FileNotFoundException(java.io.FileNotFoundException) Sets(com.google.common.collect.Sets) SkewedTableUtils(org.apache.hadoop.hive.ql.ddl.table.storage.skewed.SkewedTableUtils) LimitDesc(org.apache.hadoop.hive.ql.plan.LimitDesc) DelimitedJSONSerDe(org.apache.hadoop.hive.serde2.DelimitedJSONSerDe) List(java.util.List) VirtualColumn(org.apache.hadoop.hive.ql.metadata.VirtualColumn) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) HIVEARCHIVEENABLED(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEARCHIVEENABLED) WindowExpressionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec) GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) AccessControlException(java.security.AccessControlException) Optional(java.util.Optional) SessionStateUtil(org.apache.hadoop.hive.ql.session.SessionStateUtil) Pattern(java.util.regex.Pattern) SortedMap(java.util.SortedMap) WindowFunctionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) GenericUDTF(org.apache.hadoop.hive.ql.udf.generic.GenericUDTF) HashMap(java.util.HashMap) Deque(java.util.Deque) Multimap(com.google.common.collect.Multimap) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) GenericUDFCardinalityViolation(org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation) TransactionalValidationListener(org.apache.hadoop.hive.metastore.TransactionalValidationListener) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) AnalyzeState(org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState) CollectionUtils(org.apache.commons.collections.CollectionUtils) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryState(org.apache.hadoop.hive.ql.QueryState) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) SessionHiveMetaStoreClient(org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) Entity(org.apache.hadoop.hive.ql.hooks.Entity) FileInputFormat(org.apache.hadoop.mapreduce.lib.input.FileInputFormat) GenericUDF(org.apache.hadoop.hive.ql.udf.generic.GenericUDF) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) WindowType(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType) SampleDesc(org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) Iterator(java.util.Iterator) TypeInfoFactory(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) NullRowsInputFormat(org.apache.hadoop.hive.ql.io.NullRowsInputFormat) PTFQueryInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputSpec) ResourceType(org.apache.hadoop.hive.ql.session.SessionState.ResourceType) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) SpecType(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType) Tree(org.antlr.runtime.tree.Tree) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) GenericUDTFInline(org.apache.hadoop.hive.ql.udf.generic.GenericUDTFInline) LateralViewForwardDesc(org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc) OrderSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec) NON_FK_FILTERED(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.NON_FK_FILTERED) LazyBinarySerDe2(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) RecordReader(org.apache.hadoop.hive.ql.exec.RecordReader) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) ArrayList(java.util.ArrayList) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator)

Aggregations

ArrayList (java.util.ArrayList)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 FileNotFoundException (java.io.FileNotFoundException)2 IOException (java.io.IOException)2 AccessControlException (java.security.AccessControlException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Map (java.util.Map)2 SortedMap (java.util.SortedMap)2 TreeMap (java.util.TreeMap)2 PatternSyntaxException (java.util.regex.PatternSyntaxException)2 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)2 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)2 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)2 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)2 Operation (org.apache.hadoop.hive.ql.io.AcidUtils.Operation)2 DefaultConstraint (org.apache.hadoop.hive.ql.metadata.DefaultConstraint)2 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)2