Search in sources :

Example 26 with FileSinkOperator

use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.

the class SimpleFetchOptimizer method checkTree.

// all we can handle is LimitOperator, FilterOperator SelectOperator and final FS
// 
// for non-aggressive mode (minimal)
// 1. sampling is not allowed
// 2. for partitioned table, all filters should be targeted to partition column
// 3. SelectOperator should use only simple cast/column access
private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, TableScanOperator ts) throws HiveException {
    SplitSample splitSample = pctx.getNameToSplitSample().get(alias);
    if (!aggressive && splitSample != null) {
        return null;
    }
    if (!aggressive && ts.getConf().getTableSample() != null) {
        return null;
    }
    Table table = ts.getConf().getTableMetadata();
    if (table == null) {
        return null;
    }
    ReadEntity parent = PlanUtils.getParentViewInfo(alias, pctx.getViewAliasToInput());
    if (!table.isPartitioned()) {
        FetchData fetch = new FetchData(ts, parent, table, splitSample);
        return checkOperators(fetch, aggressive, false);
    }
    boolean bypassFilter = false;
    if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEOPTPPD)) {
        ExprNodeDesc pruner = pctx.getOpToPartPruner().get(ts);
        if (PartitionPruner.onlyContainsPartnCols(table, pruner)) {
            bypassFilter = !pctx.getPrunedPartitions(alias, ts).hasUnknownPartitions();
        }
    }
    boolean onlyPruningFilter = bypassFilter;
    Operator<?> op = ts;
    while (onlyPruningFilter) {
        if (op instanceof FileSinkOperator || op.getChildOperators() == null) {
            break;
        } else if (op.getChildOperators().size() != 1) {
            onlyPruningFilter = false;
            break;
        } else {
            op = op.getChildOperators().get(0);
        }
        if (op instanceof FilterOperator) {
            ExprNodeDesc predicate = ((FilterOperator) op).getConf().getPredicate();
            if (predicate instanceof ExprNodeConstantDesc && "boolean".equals(predicate.getTypeInfo().getTypeName())) {
                continue;
            } else if (PartitionPruner.onlyContainsPartnCols(table, predicate)) {
                continue;
            } else {
                onlyPruningFilter = false;
            }
        }
    }
    if (!aggressive && !onlyPruningFilter) {
        return null;
    }
    PrunedPartitionList partitions = pctx.getPrunedPartitions(alias, ts);
    FetchData fetch = new FetchData(ts, parent, table, partitions, splitSample, onlyPruningFilter);
    return checkOperators(fetch, aggressive, bypassFilter);
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) Table(org.apache.hadoop.hive.ql.metadata.Table) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) SplitSample(org.apache.hadoop.hive.ql.parse.SplitSample) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc)

Example 27 with FileSinkOperator

use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.

the class GenTezUtils method removeUnionOperators.

// removes any union operator and clones the plan
public static void removeUnionOperators(GenTezProcContext context, BaseWork work, int indexForTezUnion) throws SemanticException {
    List<Operator<?>> roots = new ArrayList<Operator<?>>();
    roots.addAll(work.getAllRootOperators());
    if (work.getDummyOps() != null) {
        roots.addAll(work.getDummyOps());
    }
    roots.addAll(context.eventOperatorSet);
    // need to clone the plan.
    List<Operator<?>> newRoots;
    try (TruncatedOperatorTree truncator = new TruncatedOperatorTree(roots)) {
        newRoots = SerializationUtilities.cloneOperatorTree(roots);
    }
    // we're cloning the operator plan but we're retaining the original work. That means
    // that root operators have to be replaced with the cloned ops. The replacement map
    // tells you what that mapping is.
    BiMap<Operator<?>, Operator<?>> replacementMap = HashBiMap.create();
    // there's some special handling for dummyOps required. Mapjoins won't be properly
    // initialized if their dummy parents aren't initialized. Since we cloned the plan
    // we need to replace the dummy operators in the work with the cloned ones.
    List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();
    Iterator<Operator<?>> it = newRoots.iterator();
    for (Operator<?> orig : roots) {
        Set<FileSinkOperator> fsOpSet = OperatorUtils.findOperators(orig, FileSinkOperator.class);
        for (FileSinkOperator fsOp : fsOpSet) {
            context.fileSinkSet.remove(fsOp);
        }
        Operator<?> newRoot = it.next();
        replacementMap.put(orig, newRoot);
        if (newRoot instanceof HashTableDummyOperator) {
            // dummy ops need to be updated to the cloned ones.
            dummyOps.add((HashTableDummyOperator) newRoot);
            it.remove();
        } else if (newRoot instanceof AppMasterEventOperator) {
            // need to restore the original scan.
            if (newRoot.getConf() instanceof DynamicPruningEventDesc) {
                TableScanOperator ts = ((DynamicPruningEventDesc) orig.getConf()).getTableScan();
                if (ts == null) {
                    throw new AssertionError("No table scan associated with dynamic event pruning. " + orig);
                }
                ((DynamicPruningEventDesc) newRoot.getConf()).setTableScan(ts);
            }
            it.remove();
        } else {
            if (newRoot instanceof TableScanOperator) {
                if (context.tsToEventMap.containsKey(orig)) {
                    // we need to update event operators with the cloned table scan
                    for (AppMasterEventOperator event : context.tsToEventMap.get(orig)) {
                        ((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
                    }
                }
                // This TableScanOperator could be part of semijoin optimization.
                Map<ReduceSinkOperator, SemiJoinBranchInfo> rsToSemiJoinBranchInfo = context.parseContext.getRsToSemiJoinBranchInfo();
                for (ReduceSinkOperator rs : rsToSemiJoinBranchInfo.keySet()) {
                    SemiJoinBranchInfo sjInfo = rsToSemiJoinBranchInfo.get(rs);
                    if (sjInfo.getTsOp() == orig) {
                        SemiJoinBranchInfo newSJInfo = new SemiJoinBranchInfo((TableScanOperator) newRoot, sjInfo.getIsHint());
                        rsToSemiJoinBranchInfo.put(rs, newSJInfo);
                    }
                }
                // This TableScanOperator could also be part of other events in eventOperatorSet.
                for (AppMasterEventOperator event : context.eventOperatorSet) {
                    if (event.getConf() instanceof DynamicPruningEventDesc) {
                        TableScanOperator ts = ((DynamicPruningEventDesc) event.getConf()).getTableScan();
                        if (ts.equals(orig)) {
                            ((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
                        }
                    }
                }
            }
            context.rootToWorkMap.remove(orig);
            context.rootToWorkMap.put(newRoot, work);
        }
    }
    // now we remove all the unions. we throw away any branch that's not reachable from
    // the current set of roots. The reason is that those branches will be handled in
    // different tasks.
    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
    operators.addAll(newRoots);
    Set<Operator<?>> seen = new HashSet<Operator<?>>();
    Set<FileStatus> fileStatusesToFetch = null;
    if (context.parseContext.getFetchTask() != null) {
        // File sink operator keeps a reference to a list of files. This reference needs to be passed on
        // to other file sink operators which could have been added by removal of Union Operator
        fileStatusesToFetch = context.parseContext.getFetchTask().getWork().getFilesToFetch();
    }
    while (!operators.isEmpty()) {
        Operator<?> current = operators.pop();
        if (seen.add(current) && current instanceof FileSinkOperator) {
            FileSinkOperator fileSink = (FileSinkOperator) current;
            // remember it for additional processing later
            if (context.fileSinkSet.contains(fileSink)) {
                continue;
            } else {
                context.fileSinkSet.add(fileSink);
            }
            FileSinkDesc desc = fileSink.getConf();
            Path path = desc.getDirName();
            List<FileSinkDesc> linked;
            if (!context.linkedFileSinks.containsKey(path)) {
                linked = new ArrayList<FileSinkDesc>();
                context.linkedFileSinks.put(path, linked);
            }
            linked = context.linkedFileSinks.get(path);
            linked.add(desc);
            desc.setDirName(new Path(path, AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + linked.size()));
            Utilities.FILE_OP_LOGGER.debug("removing union - new desc with " + desc.getDirName() + "; parent " + path);
            desc.setLinkedFileSink(true);
            desc.setLinkedFileSinkDesc(linked);
            desc.setFilesToFetch(fileStatusesToFetch);
        }
        if (current instanceof AppMasterEventOperator) {
            // remember for additional processing later
            context.eventOperatorSet.add((AppMasterEventOperator) current);
            // mark the original as abandoned. Don't need it anymore.
            context.abandonedEventOperatorSet.add((AppMasterEventOperator) replacementMap.inverse().get(current));
        }
        if (current instanceof UnionOperator) {
            Operator<?> parent = null;
            int count = 0;
            for (Operator<?> op : current.getParentOperators()) {
                if (seen.contains(op)) {
                    ++count;
                    parent = op;
                }
            }
            // we should have been able to reach the union from only one side.
            assert count <= 1;
            if (parent == null) {
                // root operator is union (can happen in reducers)
                replacementMap.put(current, current.getChildOperators().get(0));
            } else {
                parent.removeChildAndAdoptItsChildren(current);
            }
        }
        if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
            current.setChildOperators(null);
        } else {
            operators.addAll(current.getChildOperators());
        }
    }
    LOG.debug("Setting dummy ops for work " + work.getName() + ": " + dummyOps);
    work.setDummyOps(dummyOps);
    work.replaceRoots(replacementMap);
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) AppMasterEventOperator(org.apache.hadoop.hive.ql.exec.AppMasterEventOperator) AbstractFileMergeOperator(org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) FileStatus(org.apache.hadoop.fs.FileStatus) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Path(org.apache.hadoop.fs.Path) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) AppMasterEventOperator(org.apache.hadoop.hive.ql.exec.AppMasterEventOperator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) BiMap(com.google.common.collect.BiMap) HashBiMap(com.google.common.collect.HashBiMap)

Example 28 with FileSinkOperator

use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.

the class FileSinkProcessor method process.

@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procCtx;
    FileSinkOperator fileSink = (FileSinkOperator) nd;
    // just remember it for later processing
    context.fileSinkSet.add(fileSink);
    return true;
}
Also used : FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator)

Example 29 with FileSinkOperator

use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.

the class SparkTask method getOperatorCounters.

private Map<String, List<String>> getOperatorCounters() {
    String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
    Map<String, List<String>> counters = new HashMap<String, List<String>>();
    List<String> hiveCounters = new LinkedList<String>();
    counters.put(groupName, hiveCounters);
    hiveCounters.add(Operator.HIVE_COUNTER_CREATED_FILES);
    hiveCounters.add(FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN);
    // Spark transformation and Hive operators in SparkWork.
    for (MapOperator.Counter counter : MapOperator.Counter.values()) {
        hiveCounters.add(counter.toString());
    }
    SparkWork sparkWork = this.getWork();
    for (BaseWork work : sparkWork.getAllWork()) {
        for (Operator<? extends OperatorDesc> operator : work.getAllOperators()) {
            if (operator instanceof FileSinkOperator) {
                for (FileSinkOperator.Counter counter : FileSinkOperator.Counter.values()) {
                    hiveCounters.add(((FileSinkOperator) operator).getCounterName(counter));
                }
            } else if (operator instanceof ReduceSinkOperator) {
                final String contextName = conf.get(Operator.CONTEXT_NAME_KEY, "");
                for (ReduceSinkOperator.Counter counter : ReduceSinkOperator.Counter.values()) {
                    hiveCounters.add(Utilities.getVertexCounterName(counter.name(), contextName));
                }
            } else if (operator instanceof ScriptOperator) {
                for (ScriptOperator.Counter counter : ScriptOperator.Counter.values()) {
                    hiveCounters.add(counter.toString());
                }
            } else if (operator instanceof JoinOperator) {
                for (JoinOperator.SkewkeyTableCounter counter : JoinOperator.SkewkeyTableCounter.values()) {
                    hiveCounters.add(counter.toString());
                }
            }
        }
    }
    return counters;
}
Also used : JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) HashMap(java.util.HashMap) ScriptOperator(org.apache.hadoop.hive.ql.exec.ScriptOperator) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) LinkedList(java.util.LinkedList) MapOperator(org.apache.hadoop.hive.ql.exec.MapOperator) SparkCounter(org.apache.hive.spark.counter.SparkCounter) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 30 with FileSinkOperator

use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.

the class SemanticAnalyzer method analyzeInternal.

@SuppressWarnings("checkstyle:methodlength")
void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticException {
    LOG.info("Starting Semantic Analysis");
    // 1. Generate Resolved Parse tree from syntax tree
    boolean needsTransform = needsTransform();
    // change the location of position alias process here
    processPositionAlias(ast);
    cacheTableHelper.populateCache(ctx.getParsedTables(), conf, getTxnMgr());
    PlannerContext plannerCtx = pcf.get();
    if (!genResolvedParseTree(ast, plannerCtx)) {
        return;
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) {
        for (String alias : qb.getSubqAliases()) {
            removeOBInSubQuery(qb.getSubqForAlias(alias));
        }
    }
    final String llapIOETLSkipFormat = HiveConf.getVar(conf, ConfVars.LLAP_IO_ETL_SKIP_FORMAT);
    if (qb.getParseInfo().hasInsertTables() || qb.isCTAS()) {
        if (llapIOETLSkipFormat.equalsIgnoreCase("encode")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENCODE_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO encode as ETL query is detected");
        } else if (llapIOETLSkipFormat.equalsIgnoreCase("all")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO as ETL query is detected");
        }
    }
    // Check query results cache.
    // If no masking/filtering required, then we can check the cache now, before
    // generating the operator tree and going through CBO.
    // Otherwise we have to wait until after the masking/filtering step.
    boolean isCacheEnabled = isResultsCacheEnabled();
    QueryResultsCache.LookupInfo lookupInfo = null;
    if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    ASTNode astForMasking;
    if (isCBOExecuted() && needsTransform && (qb.isCTAS() || forViewCreation || qb.isMaterializedView() || qb.isMultiDestQuery())) {
        // If we use CBO and we may apply masking/filtering policies, we create a copy of the ast.
        // The reason is that the generation of the operator tree may modify the initial ast,
        // but if we need to parse for a second time, we would like to parse the unmodified ast.
        astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(ast);
    } else {
        astForMasking = ast;
    }
    // 2. Gen OP Tree from resolved Parse Tree
    sinkOp = genOPTree(ast, plannerCtx);
    boolean usesMasking = false;
    if (!forViewCreation && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && (tableMask.isEnabled() && analyzeRewrite == null)) {
        // Here we rewrite the * and also the masking table
        ParseResult rewrittenResult = rewriteASTWithMaskAndFilter(tableMask, astForMasking, ctx.getTokenRewriteStream(), ctx, db);
        ASTNode rewrittenAST = rewrittenResult.getTree();
        if (astForMasking != rewrittenAST) {
            usesMasking = true;
            plannerCtx = pcf.get();
            ctx.setSkipTableMasking(true);
            ctx.setTokenRewriteStream(rewrittenResult.getTokenRewriteStream());
            init(true);
            // change the location of position alias process here
            processPositionAlias(rewrittenAST);
            genResolvedParseTree(rewrittenAST, plannerCtx);
            if (this instanceof CalcitePlanner) {
                ((CalcitePlanner) this).resetCalciteConfiguration();
            }
            sinkOp = genOPTree(rewrittenAST, plannerCtx);
        }
    }
    // validate if this sink operation is allowed for non-native tables
    if (sinkOp instanceof FileSinkOperator) {
        FileSinkOperator fileSinkOperator = (FileSinkOperator) sinkOp;
        Optional<HiveStorageHandler> handler = Optional.ofNullable(fileSinkOperator).map(FileSinkOperator::getConf).map(FileSinkDesc::getTable).map(Table::getStorageHandler);
        if (handler.isPresent()) {
            handler.get().validateSinkDesc(fileSinkOperator.getConf());
        }
    }
    // TODO: Enable caching for queries with masking/filtering
    if (isCacheEnabled && needsTransform && !usesMasking && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    // 3. Deduce Resultset Schema
    if ((forViewCreation || createVwDesc != null) && !this.ctx.isCboSucceeded()) {
        resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
    } else {
        // succeeds.
        if (resultSchema == null) {
            resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
        }
    }
    // 4. Generate Parse Context for Optimizer & Physical compiler
    copyInfoToQueryProperties(queryProperties);
    ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet<JoinOperator>(joinContext.keySet()), new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()), loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, queryProperties, viewProjectToTableSchema);
    // Set the semijoin hints in parse context
    pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
    // Set the mapjoin hint if it needs to be disabled.
    pCtx.setDisableMapJoin(disableMapJoinWithHint(getQB().getParseInfo().getHintList()));
    if (forViewCreation) {
        // Generate lineage info if LineageLogger hook is configured.
        // Add the transformation that computes the lineage information.
        Set<String> postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings().split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS))));
        if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) {
            List<Transform> transformations = new ArrayList<Transform>();
            transformations.add(new HiveOpConverterPostProc());
            transformations.add(new Generator(postExecHooks));
            for (Transform t : transformations) {
                pCtx = t.transform(pCtx);
            }
        }
    }
    // 5. Take care of view creation
    if (createVwDesc != null) {
        if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
            return;
        }
        if (!ctx.isCboSucceeded()) {
            saveViewDefinition();
        }
        // validate the create view statement at this point, the createVwDesc gets
        // all the information for semanticcheck
        validateCreateView();
        createVwDesc.setTablesUsed(pCtx.getTablesUsed());
    }
    // it means that in step 2, the ColumnAccessInfo was already created
    if (!forViewCreation || getColumnAccessInfo() == null) {
        // 6. Generate table access stats if required
        if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS)) {
            TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
            setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
        }
        AuxOpTreeSignature.linkAuxSignatures(pCtx);
        // 7. Perform Logical optimization
        if (LOG.isDebugEnabled()) {
            LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        Optimizer optm = new Optimizer();
        optm.setPctx(pCtx);
        optm.initialize(conf);
        pCtx = optm.optimize();
        if (pCtx.getColumnAccessInfo() != null) {
            // set ColumnAccessInfo for view column authorization
            setColumnAccessInfo(pCtx.getColumnAccessInfo());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        // 8. Generate column access stats if required - wait until column pruning
        // takes place during optimization
        boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
        if (isColumnInfoNeedForAuth || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
            ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
            // view column access info is carried by this.getColumnAccessInfo().
            setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess(this.getColumnAccessInfo()));
        }
    }
    if (forViewCreation) {
        return;
    }
    // 9. Optimize Physical op tree & Translate to target execution engine (MR,
    // TEZ..)
    compilePlan(pCtx);
    // find all Acid FileSinkOperatorS
    new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
    // 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
    final Optional<TezTask> optionalTezTask = rootTasks.stream().filter(task -> task instanceof TezTask).map(task -> (TezTask) task).findFirst();
    if (optionalTezTask.isPresent()) {
        final TezTask tezTask = optionalTezTask.get();
        rootTasks.stream().filter(task -> task.getWork() instanceof DDLWork).map(task -> (DDLWork) task.getWork()).filter(ddlWork -> ddlWork.getDDLDesc() instanceof PreInsertTableDesc).map(ddlWork -> (PreInsertTableDesc) ddlWork.getDDLDesc()).map(desc -> new InsertCommitHookDesc(desc.getTable(), desc.isOverwrite())).forEach(insertCommitHookDesc -> tezTask.addDependentTask(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf)));
    }
    LOG.info("Completed plan generation");
    // 11. put accessed columns to readEntity
    if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
        putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
    }
    if (isCacheEnabled && lookupInfo != null) {
        if (queryCanBeCached()) {
            // requires SemanticAnalyzer state to be reset.
            if (checkResultsCache(lookupInfo, true)) {
                LOG.info("Cached result found on second lookup");
            } else {
                QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
                // Specify that the results of this query can be cached.
                setCacheUsage(new CacheUsage(CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS, queryInfo));
            }
        }
    }
}
Also used : AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) StringUtils(org.apache.commons.lang3.StringUtils) TypeCheckCtx(org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx) TableScanDesc(org.apache.hadoop.hive.ql.plan.TableScanDesc) AbstractSerDe(org.apache.hadoop.hive.serde2.AbstractSerDe) IntMath(com.google.common.math.IntMath) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) ArchiveUtils(org.apache.hadoop.hive.ql.exec.ArchiveUtils) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) QueryProperties(org.apache.hadoop.hive.ql.QueryProperties) OrderExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression) UDTFDesc(org.apache.hadoop.hive.ql.plan.UDTFDesc) GenericUDFSurrogateKey(org.apache.hadoop.hive.ql.udf.generic.GenericUDFSurrogateKey) TokenRewriteStream(org.antlr.runtime.TokenRewriteStream) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) AbstractCreateViewAnalyzer.validateTablesUsed(org.apache.hadoop.hive.ql.ddl.view.create.AbstractCreateViewAnalyzer.validateTablesUsed) MetadataTypedColumnsetSerDe(org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe) FileUtils(org.apache.hadoop.hive.common.FileUtils) AuxOpTreeSignature(org.apache.hadoop.hive.ql.plan.mapper.AuxOpTreeSignature) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) ScriptDesc(org.apache.hadoop.hive.ql.plan.ScriptDesc) TypeInfoUtils(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Supplier(java.util.function.Supplier) LinkedHashMap(java.util.LinkedHashMap) ExprNodeFieldDesc(org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc) Strings(com.google.common.base.Strings) ConstraintsUtils(org.apache.hadoop.hive.ql.ddl.table.constraint.ConstraintsUtils) Lists(com.google.common.collect.Lists) SemanticDispatcher(org.apache.hadoop.hive.ql.lib.SemanticDispatcher) SemanticGraphWalker(org.apache.hadoop.hive.ql.lib.SemanticGraphWalker) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) TypeCheckProcFactory(org.apache.hadoop.hive.ql.parse.type.TypeCheckProcFactory) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) PartitionedTableFunctionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionedTableFunctionSpec) NullOrdering(org.apache.hadoop.hive.ql.util.NullOrdering) SubQueryType(org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType) HiveUtils(org.apache.hadoop.hive.ql.metadata.HiveUtils) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) PTFQueryInputType(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputType) IOException(java.io.IOException) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Partition(org.apache.hadoop.hive.ql.metadata.Partition) TreeMap(java.util.TreeMap) DirectionUtils(org.apache.hadoop.hive.ql.util.DirectionUtils) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) ReflectionUtils(org.apache.hadoop.util.ReflectionUtils) PTFExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef) SortedSet(java.util.SortedSet) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) WindowFrameSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) Description(org.apache.hadoop.hive.ql.exec.Description) RecordWriter(org.apache.hadoop.hive.ql.exec.RecordWriter) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) PartitionExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression) HiveIgnoreKeyTextOutputFormat(org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat) Token(org.antlr.runtime.Token) ReduceField(org.apache.hadoop.hive.ql.exec.Utilities.ReduceField) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) UnionProcContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext) UnionDesc(org.apache.hadoop.hive.ql.plan.UnionDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) PartitioningSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec) Collection(java.util.Collection) Order(org.apache.hadoop.hive.metastore.api.Order) UUID(java.util.UUID) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) ThriftJDBCBinarySerDe(org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe) LongMath(com.google.common.math.LongMath) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) ASTBuilder(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder) Entry(java.util.Map.Entry) Queue(java.util.Queue) Objects.nonNull(java.util.Objects.nonNull) ISubQueryJoinInfo(org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ResultFileFormat(org.apache.hadoop.hive.conf.HiveConf.ResultFileFormat) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) FilterDesc(org.apache.hadoop.hive.ql.plan.FilterDesc) HIVE_DEFAULT_STORAGE_HANDLER(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DEFAULT_STORAGE_HANDLER) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) TABLE_IS_CTAS(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTAS) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) HashSet(java.util.HashSet) Category(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category) OrderExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef) Utils(org.apache.hadoop.hive.shims.Utils) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) LinkedList(java.util.LinkedList) TreeVisitorAction(org.antlr.runtime.tree.TreeVisitorAction) NullStructSerDe(org.apache.hadoop.hive.serde2.NullStructSerDe) SerDeUtils(org.apache.hadoop.hive.serde2.SerDeUtils) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) ForwardDesc(org.apache.hadoop.hive.ql.plan.ForwardDesc) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) StrictChecks(org.apache.hadoop.hive.conf.HiveConf.StrictChecks) ObjectInspectorFactory(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ExprNodeDescUtils(org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils) ExprNodeColumnListDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc) MetricsConstant(org.apache.hadoop.hive.common.metrics.common.MetricsConstant) WindowFunctionInfo(org.apache.hadoop.hive.ql.exec.WindowFunctionInfo) Arrays(java.util.Arrays) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenericUDFMurmurHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFMurmurHash) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TaskFactory(org.apache.hadoop.hive.ql.exec.TaskFactory) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) StatDB(org.apache.hadoop.hive.common.StatsSetupConst.StatDB) ExprNodeEvaluatorFactory(org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) org.apache.hadoop.hive.serde.serdeConstants(org.apache.hadoop.hive.serde.serdeConstants) Set(java.util.Set) FunctionInfo(org.apache.hadoop.hive.ql.exec.FunctionInfo) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) AlterTableUnsetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableUnsetPropertiesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) GenericUDFOPOr(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr) ArrowColumnarBatchSerDe(org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) Mode(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode) ReduceSinkDesc(org.apache.hadoop.hive.ql.plan.ReduceSinkDesc) GenericUDFArray(org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) ClassicToken(org.antlr.runtime.ClassicToken) GenericUDFHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) Direction(org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) TreeSet(java.util.TreeSet) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) Task(org.apache.hadoop.hive.ql.exec.Task) UnsupportedFeature(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BoundarySpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) StringInternUtils(org.apache.hadoop.hive.common.StringInternUtils) PlanUtils(org.apache.hadoop.hive.ql.plan.PlanUtils) DYNAMICPARTITIONCONVERT(org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT) WindowSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec) LateralViewJoinDesc(org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) Table(org.apache.hadoop.hive.ql.metadata.Table) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Node(org.apache.hadoop.hive.ql.lib.Node) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) IOUtils(org.apache.hadoop.io.IOUtils) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) OperatorFactory(org.apache.hadoop.hive.ql.exec.OperatorFactory) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) PartitionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec) TableType(org.apache.hadoop.hive.metastore.TableType) ConstantObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector) ArrayDeque(java.util.ArrayDeque) CommonToken(org.antlr.runtime.CommonToken) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeTypeCheck(org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) ResourceDownloader(org.apache.hadoop.hive.ql.util.ResourceDownloader) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) PTFInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFInputSpec) HIVESTATSDBCLASS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS) PartitionedTableFunctionDef(org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) Context(org.apache.hadoop.hive.ql.Context) PTFDesc(org.apache.hadoop.hive.ql.plan.PTFDesc) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) Splitter(com.google.common.base.Splitter) OutputFormat(org.apache.hadoop.mapred.OutputFormat) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) PatternSyntaxException(java.util.regex.PatternSyntaxException) TreeVisitor(org.antlr.runtime.tree.TreeVisitor) ImmutableMap(com.google.common.collect.ImmutableMap) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) FileNotFoundException(java.io.FileNotFoundException) Sets(com.google.common.collect.Sets) SkewedTableUtils(org.apache.hadoop.hive.ql.ddl.table.storage.skewed.SkewedTableUtils) LimitDesc(org.apache.hadoop.hive.ql.plan.LimitDesc) DelimitedJSONSerDe(org.apache.hadoop.hive.serde2.DelimitedJSONSerDe) List(java.util.List) VirtualColumn(org.apache.hadoop.hive.ql.metadata.VirtualColumn) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) HIVEARCHIVEENABLED(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEARCHIVEENABLED) WindowExpressionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec) GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) AccessControlException(java.security.AccessControlException) Optional(java.util.Optional) SessionStateUtil(org.apache.hadoop.hive.ql.session.SessionStateUtil) Pattern(java.util.regex.Pattern) SortedMap(java.util.SortedMap) WindowFunctionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) GenericUDTF(org.apache.hadoop.hive.ql.udf.generic.GenericUDTF) HashMap(java.util.HashMap) Deque(java.util.Deque) Multimap(com.google.common.collect.Multimap) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) GenericUDFCardinalityViolation(org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation) TransactionalValidationListener(org.apache.hadoop.hive.metastore.TransactionalValidationListener) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) AnalyzeState(org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState) CollectionUtils(org.apache.commons.collections.CollectionUtils) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryState(org.apache.hadoop.hive.ql.QueryState) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) SessionHiveMetaStoreClient(org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) Entity(org.apache.hadoop.hive.ql.hooks.Entity) FileInputFormat(org.apache.hadoop.mapreduce.lib.input.FileInputFormat) GenericUDF(org.apache.hadoop.hive.ql.udf.generic.GenericUDF) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) WindowType(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType) SampleDesc(org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) Iterator(java.util.Iterator) TypeInfoFactory(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) NullRowsInputFormat(org.apache.hadoop.hive.ql.io.NullRowsInputFormat) PTFQueryInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputSpec) ResourceType(org.apache.hadoop.hive.ql.session.SessionState.ResourceType) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) SpecType(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType) Tree(org.antlr.runtime.tree.Tree) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) GenericUDTFInline(org.apache.hadoop.hive.ql.udf.generic.GenericUDTFInline) LateralViewForwardDesc(org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc) OrderSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec) NON_FK_FILTERED(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.NON_FK_FILTERED) LazyBinarySerDe2(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) RecordReader(org.apache.hadoop.hive.ql.exec.RecordReader) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) ArrayList(java.util.ArrayList) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator)

Aggregations

FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)33 ArrayList (java.util.ArrayList)14 Operator (org.apache.hadoop.hive.ql.exec.Operator)13 Path (org.apache.hadoop.fs.Path)12 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)12 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)10 LinkedHashMap (java.util.LinkedHashMap)8 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)8 Task (org.apache.hadoop.hive.ql.exec.Task)8 UnionOperator (org.apache.hadoop.hive.ql.exec.UnionOperator)7 FileSinkDesc (org.apache.hadoop.hive.ql.plan.FileSinkDesc)7 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)6 MapRedTask (org.apache.hadoop.hive.ql.exec.mr.MapRedTask)6 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)6 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)6 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)5 SMBMapJoinOperator (org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator)5 Node (org.apache.hadoop.hive.ql.lib.Node)5 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)5 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)5