use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class SimpleFetchOptimizer method checkTree.
// all we can handle is LimitOperator, FilterOperator SelectOperator and final FS
//
// for non-aggressive mode (minimal)
// 1. sampling is not allowed
// 2. for partitioned table, all filters should be targeted to partition column
// 3. SelectOperator should use only simple cast/column access
private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, TableScanOperator ts) throws HiveException {
SplitSample splitSample = pctx.getNameToSplitSample().get(alias);
if (!aggressive && splitSample != null) {
return null;
}
if (!aggressive && ts.getConf().getTableSample() != null) {
return null;
}
Table table = ts.getConf().getTableMetadata();
if (table == null) {
return null;
}
ReadEntity parent = PlanUtils.getParentViewInfo(alias, pctx.getViewAliasToInput());
if (!table.isPartitioned()) {
FetchData fetch = new FetchData(ts, parent, table, splitSample);
return checkOperators(fetch, aggressive, false);
}
boolean bypassFilter = false;
if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEOPTPPD)) {
ExprNodeDesc pruner = pctx.getOpToPartPruner().get(ts);
if (PartitionPruner.onlyContainsPartnCols(table, pruner)) {
bypassFilter = !pctx.getPrunedPartitions(alias, ts).hasUnknownPartitions();
}
}
boolean onlyPruningFilter = bypassFilter;
Operator<?> op = ts;
while (onlyPruningFilter) {
if (op instanceof FileSinkOperator || op.getChildOperators() == null) {
break;
} else if (op.getChildOperators().size() != 1) {
onlyPruningFilter = false;
break;
} else {
op = op.getChildOperators().get(0);
}
if (op instanceof FilterOperator) {
ExprNodeDesc predicate = ((FilterOperator) op).getConf().getPredicate();
if (predicate instanceof ExprNodeConstantDesc && "boolean".equals(predicate.getTypeInfo().getTypeName())) {
continue;
} else if (PartitionPruner.onlyContainsPartnCols(table, predicate)) {
continue;
} else {
onlyPruningFilter = false;
}
}
}
if (!aggressive && !onlyPruningFilter) {
return null;
}
PrunedPartitionList partitions = pctx.getPrunedPartitions(alias, ts);
FetchData fetch = new FetchData(ts, parent, table, partitions, splitSample, onlyPruningFilter);
return checkOperators(fetch, aggressive, bypassFilter);
}
use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class GenTezUtils method removeUnionOperators.
// removes any union operator and clones the plan
public static void removeUnionOperators(GenTezProcContext context, BaseWork work, int indexForTezUnion) throws SemanticException {
List<Operator<?>> roots = new ArrayList<Operator<?>>();
roots.addAll(work.getAllRootOperators());
if (work.getDummyOps() != null) {
roots.addAll(work.getDummyOps());
}
roots.addAll(context.eventOperatorSet);
// need to clone the plan.
List<Operator<?>> newRoots;
try (TruncatedOperatorTree truncator = new TruncatedOperatorTree(roots)) {
newRoots = SerializationUtilities.cloneOperatorTree(roots);
}
// we're cloning the operator plan but we're retaining the original work. That means
// that root operators have to be replaced with the cloned ops. The replacement map
// tells you what that mapping is.
BiMap<Operator<?>, Operator<?>> replacementMap = HashBiMap.create();
// there's some special handling for dummyOps required. Mapjoins won't be properly
// initialized if their dummy parents aren't initialized. Since we cloned the plan
// we need to replace the dummy operators in the work with the cloned ones.
List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();
Iterator<Operator<?>> it = newRoots.iterator();
for (Operator<?> orig : roots) {
Set<FileSinkOperator> fsOpSet = OperatorUtils.findOperators(orig, FileSinkOperator.class);
for (FileSinkOperator fsOp : fsOpSet) {
context.fileSinkSet.remove(fsOp);
}
Operator<?> newRoot = it.next();
replacementMap.put(orig, newRoot);
if (newRoot instanceof HashTableDummyOperator) {
// dummy ops need to be updated to the cloned ones.
dummyOps.add((HashTableDummyOperator) newRoot);
it.remove();
} else if (newRoot instanceof AppMasterEventOperator) {
// need to restore the original scan.
if (newRoot.getConf() instanceof DynamicPruningEventDesc) {
TableScanOperator ts = ((DynamicPruningEventDesc) orig.getConf()).getTableScan();
if (ts == null) {
throw new AssertionError("No table scan associated with dynamic event pruning. " + orig);
}
((DynamicPruningEventDesc) newRoot.getConf()).setTableScan(ts);
}
it.remove();
} else {
if (newRoot instanceof TableScanOperator) {
if (context.tsToEventMap.containsKey(orig)) {
// we need to update event operators with the cloned table scan
for (AppMasterEventOperator event : context.tsToEventMap.get(orig)) {
((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
}
}
// This TableScanOperator could be part of semijoin optimization.
Map<ReduceSinkOperator, SemiJoinBranchInfo> rsToSemiJoinBranchInfo = context.parseContext.getRsToSemiJoinBranchInfo();
for (ReduceSinkOperator rs : rsToSemiJoinBranchInfo.keySet()) {
SemiJoinBranchInfo sjInfo = rsToSemiJoinBranchInfo.get(rs);
if (sjInfo.getTsOp() == orig) {
SemiJoinBranchInfo newSJInfo = new SemiJoinBranchInfo((TableScanOperator) newRoot, sjInfo.getIsHint());
rsToSemiJoinBranchInfo.put(rs, newSJInfo);
}
}
// This TableScanOperator could also be part of other events in eventOperatorSet.
for (AppMasterEventOperator event : context.eventOperatorSet) {
if (event.getConf() instanceof DynamicPruningEventDesc) {
TableScanOperator ts = ((DynamicPruningEventDesc) event.getConf()).getTableScan();
if (ts.equals(orig)) {
((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
}
}
}
}
context.rootToWorkMap.remove(orig);
context.rootToWorkMap.put(newRoot, work);
}
}
// now we remove all the unions. we throw away any branch that's not reachable from
// the current set of roots. The reason is that those branches will be handled in
// different tasks.
Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
operators.addAll(newRoots);
Set<Operator<?>> seen = new HashSet<Operator<?>>();
Set<FileStatus> fileStatusesToFetch = null;
if (context.parseContext.getFetchTask() != null) {
// File sink operator keeps a reference to a list of files. This reference needs to be passed on
// to other file sink operators which could have been added by removal of Union Operator
fileStatusesToFetch = context.parseContext.getFetchTask().getWork().getFilesToFetch();
}
while (!operators.isEmpty()) {
Operator<?> current = operators.pop();
if (seen.add(current) && current instanceof FileSinkOperator) {
FileSinkOperator fileSink = (FileSinkOperator) current;
// remember it for additional processing later
if (context.fileSinkSet.contains(fileSink)) {
continue;
} else {
context.fileSinkSet.add(fileSink);
}
FileSinkDesc desc = fileSink.getConf();
Path path = desc.getDirName();
List<FileSinkDesc> linked;
if (!context.linkedFileSinks.containsKey(path)) {
linked = new ArrayList<FileSinkDesc>();
context.linkedFileSinks.put(path, linked);
}
linked = context.linkedFileSinks.get(path);
linked.add(desc);
desc.setDirName(new Path(path, AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + linked.size()));
Utilities.FILE_OP_LOGGER.debug("removing union - new desc with " + desc.getDirName() + "; parent " + path);
desc.setLinkedFileSink(true);
desc.setLinkedFileSinkDesc(linked);
desc.setFilesToFetch(fileStatusesToFetch);
}
if (current instanceof AppMasterEventOperator) {
// remember for additional processing later
context.eventOperatorSet.add((AppMasterEventOperator) current);
// mark the original as abandoned. Don't need it anymore.
context.abandonedEventOperatorSet.add((AppMasterEventOperator) replacementMap.inverse().get(current));
}
if (current instanceof UnionOperator) {
Operator<?> parent = null;
int count = 0;
for (Operator<?> op : current.getParentOperators()) {
if (seen.contains(op)) {
++count;
parent = op;
}
}
// we should have been able to reach the union from only one side.
assert count <= 1;
if (parent == null) {
// root operator is union (can happen in reducers)
replacementMap.put(current, current.getChildOperators().get(0));
} else {
parent.removeChildAndAdoptItsChildren(current);
}
}
if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
current.setChildOperators(null);
} else {
operators.addAll(current.getChildOperators());
}
}
LOG.debug("Setting dummy ops for work " + work.getName() + ": " + dummyOps);
work.setDummyOps(dummyOps);
work.replaceRoots(replacementMap);
}
use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class FileSinkProcessor method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
GenTezProcContext context = (GenTezProcContext) procCtx;
FileSinkOperator fileSink = (FileSinkOperator) nd;
// just remember it for later processing
context.fileSinkSet.add(fileSink);
return true;
}
use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class SparkTask method getOperatorCounters.
private Map<String, List<String>> getOperatorCounters() {
String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
Map<String, List<String>> counters = new HashMap<String, List<String>>();
List<String> hiveCounters = new LinkedList<String>();
counters.put(groupName, hiveCounters);
hiveCounters.add(Operator.HIVE_COUNTER_CREATED_FILES);
hiveCounters.add(FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN);
// Spark transformation and Hive operators in SparkWork.
for (MapOperator.Counter counter : MapOperator.Counter.values()) {
hiveCounters.add(counter.toString());
}
SparkWork sparkWork = this.getWork();
for (BaseWork work : sparkWork.getAllWork()) {
for (Operator<? extends OperatorDesc> operator : work.getAllOperators()) {
if (operator instanceof FileSinkOperator) {
for (FileSinkOperator.Counter counter : FileSinkOperator.Counter.values()) {
hiveCounters.add(((FileSinkOperator) operator).getCounterName(counter));
}
} else if (operator instanceof ReduceSinkOperator) {
final String contextName = conf.get(Operator.CONTEXT_NAME_KEY, "");
for (ReduceSinkOperator.Counter counter : ReduceSinkOperator.Counter.values()) {
hiveCounters.add(Utilities.getVertexCounterName(counter.name(), contextName));
}
} else if (operator instanceof ScriptOperator) {
for (ScriptOperator.Counter counter : ScriptOperator.Counter.values()) {
hiveCounters.add(counter.toString());
}
} else if (operator instanceof JoinOperator) {
for (JoinOperator.SkewkeyTableCounter counter : JoinOperator.SkewkeyTableCounter.values()) {
hiveCounters.add(counter.toString());
}
}
}
}
return counters;
}
use of org.apache.hadoop.hive.ql.exec.FileSinkOperator in project hive by apache.
the class SemanticAnalyzer method analyzeInternal.
@SuppressWarnings("checkstyle:methodlength")
void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticException {
LOG.info("Starting Semantic Analysis");
// 1. Generate Resolved Parse tree from syntax tree
boolean needsTransform = needsTransform();
// change the location of position alias process here
processPositionAlias(ast);
cacheTableHelper.populateCache(ctx.getParsedTables(), conf, getTxnMgr());
PlannerContext plannerCtx = pcf.get();
if (!genResolvedParseTree(ast, plannerCtx)) {
return;
}
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) {
for (String alias : qb.getSubqAliases()) {
removeOBInSubQuery(qb.getSubqForAlias(alias));
}
}
final String llapIOETLSkipFormat = HiveConf.getVar(conf, ConfVars.LLAP_IO_ETL_SKIP_FORMAT);
if (qb.getParseInfo().hasInsertTables() || qb.isCTAS()) {
if (llapIOETLSkipFormat.equalsIgnoreCase("encode")) {
conf.setBoolean(ConfVars.LLAP_IO_ENCODE_ENABLED.varname, false);
LOG.info("Disabling LLAP IO encode as ETL query is detected");
} else if (llapIOETLSkipFormat.equalsIgnoreCase("all")) {
conf.setBoolean(ConfVars.LLAP_IO_ENABLED.varname, false);
LOG.info("Disabling LLAP IO as ETL query is detected");
}
}
// Check query results cache.
// If no masking/filtering required, then we can check the cache now, before
// generating the operator tree and going through CBO.
// Otherwise we have to wait until after the masking/filtering step.
boolean isCacheEnabled = isResultsCacheEnabled();
QueryResultsCache.LookupInfo lookupInfo = null;
if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
if (checkResultsCache(lookupInfo, false)) {
return;
}
}
ASTNode astForMasking;
if (isCBOExecuted() && needsTransform && (qb.isCTAS() || forViewCreation || qb.isMaterializedView() || qb.isMultiDestQuery())) {
// If we use CBO and we may apply masking/filtering policies, we create a copy of the ast.
// The reason is that the generation of the operator tree may modify the initial ast,
// but if we need to parse for a second time, we would like to parse the unmodified ast.
astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(ast);
} else {
astForMasking = ast;
}
// 2. Gen OP Tree from resolved Parse Tree
sinkOp = genOPTree(ast, plannerCtx);
boolean usesMasking = false;
if (!forViewCreation && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && (tableMask.isEnabled() && analyzeRewrite == null)) {
// Here we rewrite the * and also the masking table
ParseResult rewrittenResult = rewriteASTWithMaskAndFilter(tableMask, astForMasking, ctx.getTokenRewriteStream(), ctx, db);
ASTNode rewrittenAST = rewrittenResult.getTree();
if (astForMasking != rewrittenAST) {
usesMasking = true;
plannerCtx = pcf.get();
ctx.setSkipTableMasking(true);
ctx.setTokenRewriteStream(rewrittenResult.getTokenRewriteStream());
init(true);
// change the location of position alias process here
processPositionAlias(rewrittenAST);
genResolvedParseTree(rewrittenAST, plannerCtx);
if (this instanceof CalcitePlanner) {
((CalcitePlanner) this).resetCalciteConfiguration();
}
sinkOp = genOPTree(rewrittenAST, plannerCtx);
}
}
// validate if this sink operation is allowed for non-native tables
if (sinkOp instanceof FileSinkOperator) {
FileSinkOperator fileSinkOperator = (FileSinkOperator) sinkOp;
Optional<HiveStorageHandler> handler = Optional.ofNullable(fileSinkOperator).map(FileSinkOperator::getConf).map(FileSinkDesc::getTable).map(Table::getStorageHandler);
if (handler.isPresent()) {
handler.get().validateSinkDesc(fileSinkOperator.getConf());
}
}
// TODO: Enable caching for queries with masking/filtering
if (isCacheEnabled && needsTransform && !usesMasking && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
if (checkResultsCache(lookupInfo, false)) {
return;
}
}
// 3. Deduce Resultset Schema
if ((forViewCreation || createVwDesc != null) && !this.ctx.isCboSucceeded()) {
resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
} else {
// succeeds.
if (resultSchema == null) {
resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
}
}
// 4. Generate Parse Context for Optimizer & Physical compiler
copyInfoToQueryProperties(queryProperties);
ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet<JoinOperator>(joinContext.keySet()), new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()), loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, queryProperties, viewProjectToTableSchema);
// Set the semijoin hints in parse context
pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
// Set the mapjoin hint if it needs to be disabled.
pCtx.setDisableMapJoin(disableMapJoinWithHint(getQB().getParseInfo().getHintList()));
if (forViewCreation) {
// Generate lineage info if LineageLogger hook is configured.
// Add the transformation that computes the lineage information.
Set<String> postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings().split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS))));
if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) {
List<Transform> transformations = new ArrayList<Transform>();
transformations.add(new HiveOpConverterPostProc());
transformations.add(new Generator(postExecHooks));
for (Transform t : transformations) {
pCtx = t.transform(pCtx);
}
}
}
// 5. Take care of view creation
if (createVwDesc != null) {
if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
return;
}
if (!ctx.isCboSucceeded()) {
saveViewDefinition();
}
// validate the create view statement at this point, the createVwDesc gets
// all the information for semanticcheck
validateCreateView();
createVwDesc.setTablesUsed(pCtx.getTablesUsed());
}
// it means that in step 2, the ColumnAccessInfo was already created
if (!forViewCreation || getColumnAccessInfo() == null) {
// 6. Generate table access stats if required
if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS)) {
TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
}
AuxOpTreeSignature.linkAuxSignatures(pCtx);
// 7. Perform Logical optimization
if (LOG.isDebugEnabled()) {
LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
}
Optimizer optm = new Optimizer();
optm.setPctx(pCtx);
optm.initialize(conf);
pCtx = optm.optimize();
if (pCtx.getColumnAccessInfo() != null) {
// set ColumnAccessInfo for view column authorization
setColumnAccessInfo(pCtx.getColumnAccessInfo());
}
if (LOG.isDebugEnabled()) {
LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
}
// 8. Generate column access stats if required - wait until column pruning
// takes place during optimization
boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
if (isColumnInfoNeedForAuth || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
// view column access info is carried by this.getColumnAccessInfo().
setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess(this.getColumnAccessInfo()));
}
}
if (forViewCreation) {
return;
}
// 9. Optimize Physical op tree & Translate to target execution engine (MR,
// TEZ..)
compilePlan(pCtx);
// find all Acid FileSinkOperatorS
new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
// 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
final Optional<TezTask> optionalTezTask = rootTasks.stream().filter(task -> task instanceof TezTask).map(task -> (TezTask) task).findFirst();
if (optionalTezTask.isPresent()) {
final TezTask tezTask = optionalTezTask.get();
rootTasks.stream().filter(task -> task.getWork() instanceof DDLWork).map(task -> (DDLWork) task.getWork()).filter(ddlWork -> ddlWork.getDDLDesc() instanceof PreInsertTableDesc).map(ddlWork -> (PreInsertTableDesc) ddlWork.getDDLDesc()).map(desc -> new InsertCommitHookDesc(desc.getTable(), desc.isOverwrite())).forEach(insertCommitHookDesc -> tezTask.addDependentTask(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), insertCommitHookDesc), conf)));
}
LOG.info("Completed plan generation");
// 11. put accessed columns to readEntity
if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
}
if (isCacheEnabled && lookupInfo != null) {
if (queryCanBeCached()) {
// requires SemanticAnalyzer state to be reset.
if (checkResultsCache(lookupInfo, true)) {
LOG.info("Cached result found on second lookup");
} else {
QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
// Specify that the results of this query can be cached.
setCacheUsage(new CacheUsage(CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS, queryInfo));
}
}
}
}
Aggregations