Search in sources :

Example 1 with PreInsertTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc in project hive by apache.

the class SemanticAnalyzer method createPreInsertDesc.

private void createPreInsertDesc(Table table, boolean overwrite) {
    PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, overwrite);
    this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), preInsertTableDesc)));
}
Also used : DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc)

Example 2 with PreInsertTableDesc

use of org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc in project hive by apache.

the class SemanticAnalyzer method analyzeInternal.

@SuppressWarnings("checkstyle:methodlength")
void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticException {
    LOG.info("Starting Semantic Analysis");
    // 1. Generate Resolved Parse tree from syntax tree
    boolean needsTransform = needsTransform();
    // change the location of position alias process here
    processPositionAlias(ast);
    cacheTableHelper.populateCache(ctx.getParsedTables(), conf, getTxnMgr());
    PlannerContext plannerCtx = pcf.get();
    if (!genResolvedParseTree(ast, plannerCtx)) {
        return;
    }
    if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) {
        for (String alias : qb.getSubqAliases()) {
            removeOBInSubQuery(qb.getSubqForAlias(alias));
        }
    }
    final String llapIOETLSkipFormat = HiveConf.getVar(conf, ConfVars.LLAP_IO_ETL_SKIP_FORMAT);
    if (qb.getParseInfo().hasInsertTables() || qb.isCTAS()) {
        if (llapIOETLSkipFormat.equalsIgnoreCase("encode")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENCODE_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO encode as ETL query is detected");
        } else if (llapIOETLSkipFormat.equalsIgnoreCase("all")) {
            conf.setBoolean(ConfVars.LLAP_IO_ENABLED.varname, false);
            LOG.info("Disabling LLAP IO as ETL query is detected");
        }
    }
    // Check query results cache.
    // If no masking/filtering required, then we can check the cache now, before
    // generating the operator tree and going through CBO.
    // Otherwise we have to wait until after the masking/filtering step.
    boolean isCacheEnabled = isResultsCacheEnabled();
    QueryResultsCache.LookupInfo lookupInfo = null;
    if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    ASTNode astForMasking;
    if (isCBOExecuted() && needsTransform && (qb.isCTAS() || forViewCreation || qb.isMaterializedView() || qb.isMultiDestQuery())) {
        // If we use CBO and we may apply masking/filtering policies, we create a copy of the ast.
        // The reason is that the generation of the operator tree may modify the initial ast,
        // but if we need to parse for a second time, we would like to parse the unmodified ast.
        astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(ast);
    } else {
        astForMasking = ast;
    }
    // 2. Gen OP Tree from resolved Parse Tree
    sinkOp = genOPTree(ast, plannerCtx);
    boolean usesMasking = false;
    if (!forViewCreation && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && (tableMask.isEnabled() && analyzeRewrite == null)) {
        // Here we rewrite the * and also the masking table
        ParseResult rewrittenResult = rewriteASTWithMaskAndFilter(tableMask, astForMasking, ctx.getTokenRewriteStream(), ctx, db);
        ASTNode rewrittenAST = rewrittenResult.getTree();
        if (astForMasking != rewrittenAST) {
            usesMasking = true;
            plannerCtx = pcf.get();
            ctx.setSkipTableMasking(true);
            ctx.setTokenRewriteStream(rewrittenResult.getTokenRewriteStream());
            init(true);
            // change the location of position alias process here
            processPositionAlias(rewrittenAST);
            genResolvedParseTree(rewrittenAST, plannerCtx);
            if (this instanceof CalcitePlanner) {
                ((CalcitePlanner) this).resetCalciteConfiguration();
            }
            sinkOp = genOPTree(rewrittenAST, plannerCtx);
        }
    }
    // validate if this sink operation is allowed for non-native tables
    if (sinkOp instanceof FileSinkOperator) {
        FileSinkOperator fileSinkOperator = (FileSinkOperator) sinkOp;
        Optional<HiveStorageHandler> handler = Optional.ofNullable(fileSinkOperator).map(FileSinkOperator::getConf).map(FileSinkDesc::getTable).map(Table::getStorageHandler);
        if (handler.isPresent()) {
            handler.get().validateSinkDesc(fileSinkOperator.getConf());
        }
    }
    // TODO: Enable caching for queries with masking/filtering
    if (isCacheEnabled && needsTransform && !usesMasking && queryTypeCanUseCache()) {
        lookupInfo = createLookupInfoForQuery(ast);
        if (checkResultsCache(lookupInfo, false)) {
            return;
        }
    }
    // 3. Deduce Resultset Schema
    if ((forViewCreation || createVwDesc != null) && !this.ctx.isCboSucceeded()) {
        resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
    } else {
        // succeeds.
        if (resultSchema == null) {
            resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
        }
    }
    // 4. Generate Parse Context for Optimizer & Physical compiler
    copyInfoToQueryProperties(queryProperties);
    ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet<JoinOperator>(joinContext.keySet()), new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()), loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, queryProperties, viewProjectToTableSchema);
    // Set the semijoin hints in parse context
    pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
    // Set the mapjoin hint if it needs to be disabled.
    pCtx.setDisableMapJoin(disableMapJoinWithHint(getQB().getParseInfo().getHintList()));
    if (forViewCreation) {
        // Generate lineage info if LineageLogger hook is configured.
        // Add the transformation that computes the lineage information.
        Set<String> postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings().split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS))));
        if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) {
            List<Transform> transformations = new ArrayList<Transform>();
            transformations.add(new HiveOpConverterPostProc());
            transformations.add(new Generator(postExecHooks));
            for (Transform t : transformations) {
                pCtx = t.transform(pCtx);
            }
        }
    }
    // 5. Take care of view creation
    if (createVwDesc != null) {
        if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
            return;
        }
        if (!ctx.isCboSucceeded()) {
            saveViewDefinition();
        }
        // validate the create view statement at this point, the createVwDesc gets
        // all the information for semanticcheck
        validateCreateView();
        createVwDesc.setTablesUsed(pCtx.getTablesUsed());
    }
    // it means that in step 2, the ColumnAccessInfo was already created
    if (!forViewCreation || getColumnAccessInfo() == null) {
        // 6. Generate table access stats if required
        if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS)) {
            TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
            setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
        }
        AuxOpTreeSignature.linkAuxSignatures(pCtx);
        // 7. Perform Logical optimization
        if (LOG.isDebugEnabled()) {
            LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        Optimizer optm = new Optimizer();
        optm.setPctx(pCtx);
        optm.initialize(conf);
        pCtx = optm.optimize();
        if (pCtx.getColumnAccessInfo() != null) {
            // set ColumnAccessInfo for view column authorization
            setColumnAccessInfo(pCtx.getColumnAccessInfo());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
        }
        // 8. Generate column access stats if required - wait until column pruning
        // takes place during optimization
        boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
        if (isColumnInfoNeedForAuth || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
            ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
            // view column access info is carried by this.getColumnAccessInfo().
            setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess(this.getColumnAccessInfo()));
        }
    }
    if (forViewCreation) {
        return;
    }
    // 9. Optimize Physical op tree & Translate to target execution engine (MR,
    // TEZ..)
    compilePlan(pCtx);
    // find all Acid FileSinkOperatorS
    new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
    // 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
    final Optional<TezTask> optionalTezTask = rootTasks.stream().filter(task -> task instanceof TezTask).map(task -> (TezTask) task).findFirst();
    if (optionalTezTask.isPresent()) {
        final TezTask tezTask = optionalTezTask.get();
        rootTasks.stream().filter(task -> task.getWork() instanceof DDLWork).map(task -> (DDLWork) task.getWork()).filter(ddlWork -> ddlWork.getDDLDesc() instanceof PreInsertTableDesc).map(ddlWork -> (PreInsertTableDesc) ddlWork.getDDLDesc()).map(desc -> new InsertCommitHookDesc(desc.getTable(), desc.isOverwrite())).forEach(insertCommitHookDesc -> tezTask.addDependentTask(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf)));
    }
    LOG.info("Completed plan generation");
    // 11. put accessed columns to readEntity
    if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
        putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
    }
    if (isCacheEnabled && lookupInfo != null) {
        if (queryCanBeCached()) {
            // requires SemanticAnalyzer state to be reset.
            if (checkResultsCache(lookupInfo, true)) {
                LOG.info("Cached result found on second lookup");
            } else {
                QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
                // Specify that the results of this query can be cached.
                setCacheUsage(new CacheUsage(CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS, queryInfo));
            }
        }
    }
}
Also used : AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) StandardStructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector) FileStatus(org.apache.hadoop.fs.FileStatus) FunctionRegistry(org.apache.hadoop.hive.ql.exec.FunctionRegistry) StringUtils(org.apache.commons.lang3.StringUtils) TypeCheckCtx(org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx) TableScanDesc(org.apache.hadoop.hive.ql.plan.TableScanDesc) AbstractSerDe(org.apache.hadoop.hive.serde2.AbstractSerDe) IntMath(com.google.common.math.IntMath) StatsSetupConst(org.apache.hadoop.hive.common.StatsSetupConst) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) ArchiveUtils(org.apache.hadoop.hive.ql.exec.ArchiveUtils) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) QueryProperties(org.apache.hadoop.hive.ql.QueryProperties) OrderExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression) UDTFDesc(org.apache.hadoop.hive.ql.plan.UDTFDesc) GenericUDFSurrogateKey(org.apache.hadoop.hive.ql.udf.generic.GenericUDFSurrogateKey) TokenRewriteStream(org.antlr.runtime.TokenRewriteStream) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) AbstractCreateViewAnalyzer.validateTablesUsed(org.apache.hadoop.hive.ql.ddl.view.create.AbstractCreateViewAnalyzer.validateTablesUsed) MetadataTypedColumnsetSerDe(org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe) FileUtils(org.apache.hadoop.hive.common.FileUtils) AuxOpTreeSignature(org.apache.hadoop.hive.ql.plan.mapper.AuxOpTreeSignature) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) ScriptDesc(org.apache.hadoop.hive.ql.plan.ScriptDesc) TypeInfoUtils(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Supplier(java.util.function.Supplier) LinkedHashMap(java.util.LinkedHashMap) ExprNodeFieldDesc(org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc) Strings(com.google.common.base.Strings) ConstraintsUtils(org.apache.hadoop.hive.ql.ddl.table.constraint.ConstraintsUtils) Lists(com.google.common.collect.Lists) SemanticDispatcher(org.apache.hadoop.hive.ql.lib.SemanticDispatcher) SemanticGraphWalker(org.apache.hadoop.hive.ql.lib.SemanticGraphWalker) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) TypeCheckProcFactory(org.apache.hadoop.hive.ql.parse.type.TypeCheckProcFactory) Operation(org.apache.hadoop.hive.ql.io.AcidUtils.Operation) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Constants(org.apache.hadoop.hive.conf.Constants) PartitionedTableFunctionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionedTableFunctionSpec) NullOrdering(org.apache.hadoop.hive.ql.util.NullOrdering) SubQueryType(org.apache.hadoop.hive.ql.parse.QBSubQuery.SubQueryType) HiveUtils(org.apache.hadoop.hive.ql.metadata.HiveUtils) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) PTFQueryInputType(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputType) IOException(java.io.IOException) SelectDesc(org.apache.hadoop.hive.ql.plan.SelectDesc) LoadFileType(org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Partition(org.apache.hadoop.hive.ql.metadata.Partition) TreeMap(java.util.TreeMap) DirectionUtils(org.apache.hadoop.hive.ql.util.DirectionUtils) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) Deserializer(org.apache.hadoop.hive.serde2.Deserializer) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) ReflectionUtils(org.apache.hadoop.util.ReflectionUtils) PTFExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef) SortedSet(java.util.SortedSet) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) WindowFrameSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) Description(org.apache.hadoop.hive.ql.exec.Description) RecordWriter(org.apache.hadoop.hive.ql.exec.RecordWriter) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) PartitionExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression) HiveIgnoreKeyTextOutputFormat(org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat) Token(org.antlr.runtime.Token) ReduceField(org.apache.hadoop.hive.ql.exec.Utilities.ReduceField) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) UnionProcContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext) UnionDesc(org.apache.hadoop.hive.ql.plan.UnionDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) PartitioningSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec) Collection(java.util.Collection) Order(org.apache.hadoop.hive.metastore.api.Order) UUID(java.util.UUID) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) ThriftJDBCBinarySerDe(org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe) LongMath(com.google.common.math.LongMath) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) ASTBuilder(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder) Entry(java.util.Map.Entry) Queue(java.util.Queue) Objects.nonNull(java.util.Objects.nonNull) ISubQueryJoinInfo(org.apache.hadoop.hive.ql.parse.SubQueryUtils.ISubQueryJoinInfo) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ResultFileFormat(org.apache.hadoop.hive.conf.HiveConf.ResultFileFormat) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) FilterDesc(org.apache.hadoop.hive.ql.plan.FilterDesc) HIVE_DEFAULT_STORAGE_HANDLER(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DEFAULT_STORAGE_HANDLER) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) TABLE_IS_CTAS(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTAS) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) HashSet(java.util.HashSet) Category(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category) OrderExpressionDef(org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef) Utils(org.apache.hadoop.hive.shims.Utils) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) LinkedList(java.util.LinkedList) TreeVisitorAction(org.antlr.runtime.tree.TreeVisitorAction) NullStructSerDe(org.apache.hadoop.hive.serde2.NullStructSerDe) SerDeUtils(org.apache.hadoop.hive.serde2.SerDeUtils) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) GroupByDesc(org.apache.hadoop.hive.ql.plan.GroupByDesc) ForwardDesc(org.apache.hadoop.hive.ql.plan.ForwardDesc) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) StrictChecks(org.apache.hadoop.hive.conf.HiveConf.StrictChecks) ObjectInspectorFactory(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) ExprNodeDescUtils(org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils) ExprNodeColumnListDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc) MetricsConstant(org.apache.hadoop.hive.common.metrics.common.MetricsConstant) WindowFunctionInfo(org.apache.hadoop.hive.ql.exec.WindowFunctionInfo) Arrays(java.util.Arrays) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenericUDFMurmurHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFMurmurHash) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TaskFactory(org.apache.hadoop.hive.ql.exec.TaskFactory) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) StatDB(org.apache.hadoop.hive.common.StatsSetupConst.StatDB) ExprNodeEvaluatorFactory(org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) org.apache.hadoop.hive.serde.serdeConstants(org.apache.hadoop.hive.serde.serdeConstants) Set(java.util.Set) FunctionInfo(org.apache.hadoop.hive.ql.exec.FunctionInfo) AggregationDesc(org.apache.hadoop.hive.ql.plan.AggregationDesc) AlterTableUnsetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableUnsetPropertiesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) GenericUDFOPOr(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr) ArrowColumnarBatchSerDe(org.apache.hadoop.hive.ql.io.arrow.ArrowColumnarBatchSerDe) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) Mode(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode) ReduceSinkDesc(org.apache.hadoop.hive.ql.plan.ReduceSinkDesc) GenericUDFArray(org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray) HiveOperation(org.apache.hadoop.hive.ql.plan.HiveOperation) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) ClassicToken(org.antlr.runtime.ClassicToken) GenericUDFHash(org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) Direction(org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) TreeSet(java.util.TreeSet) FsAction(org.apache.hadoop.fs.permission.FsAction) ArrayList(java.util.ArrayList) Task(org.apache.hadoop.hive.ql.exec.Task) UnsupportedFeature(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BoundarySpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) StringInternUtils(org.apache.hadoop.hive.common.StringInternUtils) PlanUtils(org.apache.hadoop.hive.ql.plan.PlanUtils) DYNAMICPARTITIONCONVERT(org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT) WindowSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec) LateralViewJoinDesc(org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) Table(org.apache.hadoop.hive.ql.metadata.Table) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Node(org.apache.hadoop.hive.ql.lib.Node) NoOpFetchFormatter(org.apache.hadoop.hive.serde2.NoOpFetchFormatter) IOUtils(org.apache.hadoop.io.IOUtils) MaterializedViewUpdateDesc(org.apache.hadoop.hive.ql.ddl.view.materialized.update.MaterializedViewUpdateDesc) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) OperatorFactory(org.apache.hadoop.hive.ql.exec.OperatorFactory) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) PartitionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec) TableType(org.apache.hadoop.hive.metastore.TableType) ConstantObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector) ArrayDeque(java.util.ArrayDeque) CommonToken(org.antlr.runtime.CommonToken) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) ExprNodeTypeCheck(org.apache.hadoop.hive.ql.parse.type.ExprNodeTypeCheck) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) ResourceDownloader(org.apache.hadoop.hive.ql.util.ResourceDownloader) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) PTFInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFInputSpec) HIVESTATSDBCLASS(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS) PartitionedTableFunctionDef(org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) InputFormat(org.apache.hadoop.mapred.InputFormat) Path(org.apache.hadoop.fs.Path) Context(org.apache.hadoop.hive.ql.Context) PTFDesc(org.apache.hadoop.hive.ql.plan.PTFDesc) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) Splitter(com.google.common.base.Splitter) OutputFormat(org.apache.hadoop.mapred.OutputFormat) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) PatternSyntaxException(java.util.regex.PatternSyntaxException) TreeVisitor(org.antlr.runtime.tree.TreeVisitor) ImmutableMap(com.google.common.collect.ImmutableMap) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) FileNotFoundException(java.io.FileNotFoundException) Sets(com.google.common.collect.Sets) SkewedTableUtils(org.apache.hadoop.hive.ql.ddl.table.storage.skewed.SkewedTableUtils) LimitDesc(org.apache.hadoop.hive.ql.plan.LimitDesc) DelimitedJSONSerDe(org.apache.hadoop.hive.serde2.DelimitedJSONSerDe) List(java.util.List) VirtualColumn(org.apache.hadoop.hive.ql.metadata.VirtualColumn) DbTxnManager(org.apache.hadoop.hive.ql.lockmgr.DbTxnManager) DFSUtilClient(org.apache.hadoop.hdfs.DFSUtilClient) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) HIVEARCHIVEENABLED(org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEARCHIVEENABLED) WindowExpressionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec) GenericUDAFEvaluator(org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator) AccessControlException(java.security.AccessControlException) Optional(java.util.Optional) SessionStateUtil(org.apache.hadoop.hive.ql.session.SessionStateUtil) Pattern(java.util.regex.Pattern) SortedMap(java.util.SortedMap) WindowFunctionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) GenericUDTF(org.apache.hadoop.hive.ql.udf.generic.GenericUDTF) HashMap(java.util.HashMap) Deque(java.util.Deque) Multimap(com.google.common.collect.Multimap) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) GenericUDFCardinalityViolation(org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation) TransactionalValidationListener(org.apache.hadoop.hive.metastore.TransactionalValidationListener) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) AnalyzeState(org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState) CollectionUtils(org.apache.commons.collections.CollectionUtils) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) QueryState(org.apache.hadoop.hive.ql.QueryState) CreateTableLikeDesc(org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc) SessionHiveMetaStoreClient(org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient) TableName(org.apache.hadoop.hive.common.TableName) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) Entity(org.apache.hadoop.hive.ql.hooks.Entity) FileInputFormat(org.apache.hadoop.mapreduce.lib.input.FileInputFormat) GenericUDF(org.apache.hadoop.hive.ql.udf.generic.GenericUDF) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) WindowType(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType) SampleDesc(org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) Iterator(java.util.Iterator) TypeInfoFactory(org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) NullRowsInputFormat(org.apache.hadoop.hive.ql.io.NullRowsInputFormat) PTFQueryInputSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PTFQueryInputSpec) ResourceType(org.apache.hadoop.hive.ql.session.SessionState.ResourceType) QueryPlanPostProcessor(org.apache.hadoop.hive.ql.optimizer.QueryPlanPostProcessor) SpecType(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec.SpecType) Tree(org.antlr.runtime.tree.Tree) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) GenericUDTFInline(org.apache.hadoop.hive.ql.udf.generic.GenericUDTFInline) LateralViewForwardDesc(org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc) OrderSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec) NON_FK_FILTERED(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.NON_FK_FILTERED) LazyBinarySerDe2(org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2) Collections(java.util.Collections) Database(org.apache.hadoop.hive.metastore.api.Database) RecordReader(org.apache.hadoop.hive.ql.exec.RecordReader) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) InsertCommitHookDesc(org.apache.hadoop.hive.ql.ddl.misc.hooks.InsertCommitHookDesc) Optimizer(org.apache.hadoop.hive.ql.optimizer.Optimizer) ArrayList(java.util.ArrayList) QueryResultsCache(org.apache.hadoop.hive.ql.cache.results.QueryResultsCache) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) PreInsertTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.preinsert.PreInsertTableDesc) HiveOpConverterPostProc(org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc) CacheUsage(org.apache.hadoop.hive.ql.cache.results.CacheUsage) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.ql.metadata.Table) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Transform(org.apache.hadoop.hive.ql.optimizer.Transform) Generator(org.apache.hadoop.hive.ql.optimizer.lineage.Generator)

Aggregations

Splitter (com.google.common.base.Splitter)1 Strings (com.google.common.base.Strings)1 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 Lists (com.google.common.collect.Lists)1 Multimap (com.google.common.collect.Multimap)1 Sets (com.google.common.collect.Sets)1 IntMath (com.google.common.math.IntMath)1 LongMath (com.google.common.math.LongMath)1 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 AccessControlException (java.security.AccessControlException)1 ArrayDeque (java.util.ArrayDeque)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Deque (java.util.Deque)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1