use of org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator in project hive by apache.
the class TezCompiler method removeSemijoinOptimizationFromSMBJoins.
private static void removeSemijoinOptimizationFromSMBJoins(OptimizeTezProcContext procCtx) throws SemanticException {
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%" + ".*" + TezDummyStoreOperator.getOperatorName() + "%" + CommonMergeJoinOperator.getOperatorName() + "%"), new SMBJoinOpProc());
SMBJoinOpProcContext ctx = new SMBJoinOpProcContext();
// The dispatcher finds SMB and if there is semijoin optimization before it, removes it.
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, ctx);
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(procCtx.parseContext.getTopOps().values());
SemanticGraphWalker ogw = new PreOrderOnceWalker(disp);
ogw.startWalking(topNodes, null);
List<TableScanOperator> tsOps = new ArrayList<>();
// Iterate over the map and remove semijoin optimizations if needed.
for (CommonMergeJoinOperator joinOp : ctx.JoinOpToTsOpMap.keySet()) {
// Get one top level TS Op directly from the stack
tsOps.add(ctx.JoinOpToTsOpMap.get(joinOp));
// Get the other one by examining Join Op
List<Operator<?>> parents = joinOp.getParentOperators();
for (Operator<?> parent : parents) {
if (parent instanceof TezDummyStoreOperator) {
// already accounted for
continue;
}
while (parent != null) {
if (parent instanceof TableScanOperator) {
tsOps.add((TableScanOperator) parent);
break;
}
parent = parent.getParentOperators().get(0);
}
}
}
// Now the relevant TableScanOperators are known, find if there exists
// a semijoin filter on any of them, if so, remove it.
ParseContext pctx = procCtx.parseContext;
Set<ReduceSinkOperator> rsSet = new HashSet<>(pctx.getRsToSemiJoinBranchInfo().keySet());
for (TableScanOperator ts : tsOps) {
for (ReduceSinkOperator rs : rsSet) {
SemiJoinBranchInfo sjInfo = pctx.getRsToSemiJoinBranchInfo().get(rs);
if (sjInfo != null && ts == sjInfo.getTsOp()) {
// match!
if (sjInfo.getIsHint()) {
throw new SemanticException("Removing hinted semijoin as it is with SMB join " + rs + " : " + ts);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Semijoin optimization found going to SMB join. Removing semijoin " + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(ts));
}
GenTezUtils.removeBranch(rs);
GenTezUtils.removeSemiJoinOperator(pctx, rs, ts);
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator in project hive by apache.
the class ConvertJoinMapJoin method convertJoinSMBJoin.
// replaces the join operator with a new CommonJoinOperator, removes the
// parent reduce sinks
private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context, int mapJoinConversionPos, int numBuckets, boolean adjustParentsChildren) throws SemanticException {
MapJoinDesc mapJoinDesc = null;
if (adjustParentsChildren) {
mapJoinDesc = MapJoinProcessor.getMapJoinDesc(context.conf, joinOp, joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp.getConf().getMapAliases(), mapJoinConversionPos, true);
} else {
JoinDesc joinDesc = joinOp.getConf();
// retain the original join desc in the map join.
mapJoinDesc = new MapJoinDesc(MapJoinProcessor.getKeys(joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp).getRight(), null, joinDesc.getExprs(), null, null, joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(), joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null, joinDesc.getMemoryMonitorInfo(), joinDesc.getInMemoryDataSize());
mapJoinDesc.setNullSafes(joinDesc.getNullSafes());
mapJoinDesc.setFilterMap(joinDesc.getFilterMap());
mapJoinDesc.setResidualFilterExprs(joinDesc.getResidualFilterExprs());
// keep column expression map, explain plan uses this to display
mapJoinDesc.setColumnExprMap(joinDesc.getColumnExprMap());
mapJoinDesc.setReversedExprs(joinDesc.getReversedExprs());
mapJoinDesc.resetOrder();
}
CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) OperatorFactory.get(joinOp.getCompilationOpContext(), new CommonMergeJoinDesc(numBuckets, mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
context.parseContext.getContext().getPlanMapper().link(joinOp, mergeJoinOp);
int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp.getOpTraits().getSortCols(), numReduceSinks);
mergeJoinOp.setOpTraits(opTraits);
mergeJoinOp.getConf().setBucketingVersion(joinOp.getConf().getBucketingVersion());
preserveOperatorInfos(mergeJoinOp, joinOp, context);
for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
int pos = parentOp.getChildOperators().indexOf(joinOp);
parentOp.getChildOperators().remove(pos);
parentOp.getChildOperators().add(pos, mergeJoinOp);
}
for (Operator<? extends OperatorDesc> childOp : joinOp.getChildOperators()) {
int pos = childOp.getParentOperators().indexOf(joinOp);
childOp.getParentOperators().remove(pos);
childOp.getParentOperators().add(pos, mergeJoinOp);
}
List<Operator<? extends OperatorDesc>> childOperators = mergeJoinOp.getChildOperators();
List<Operator<? extends OperatorDesc>> parentOperators = mergeJoinOp.getParentOperators();
childOperators.clear();
parentOperators.clear();
childOperators.addAll(joinOp.getChildOperators());
parentOperators.addAll(joinOp.getParentOperators());
mergeJoinOp.getConf().setGenJoinKeys(false);
if (adjustParentsChildren) {
mergeJoinOp.getConf().setGenJoinKeys(true);
List<Operator<? extends OperatorDesc>> newParentOpList = new ArrayList<Operator<? extends OperatorDesc>>();
for (Operator<? extends OperatorDesc> parentOp : mergeJoinOp.getParentOperators()) {
for (Operator<? extends OperatorDesc> grandParentOp : parentOp.getParentOperators()) {
grandParentOp.getChildOperators().remove(parentOp);
grandParentOp.getChildOperators().add(mergeJoinOp);
newParentOpList.add(grandParentOp);
}
}
mergeJoinOp.getParentOperators().clear();
mergeJoinOp.getParentOperators().addAll(newParentOpList);
List<Operator<? extends OperatorDesc>> parentOps = new ArrayList<Operator<? extends OperatorDesc>>(mergeJoinOp.getParentOperators());
for (Operator<? extends OperatorDesc> parentOp : parentOps) {
int parentIndex = mergeJoinOp.getParentOperators().indexOf(parentOp);
if (parentIndex == mapJoinConversionPos) {
continue;
}
// during join processing, not at the time of close.
if (parentOp instanceof GroupByOperator) {
GroupByOperator gpbyOp = (GroupByOperator) parentOp;
if (gpbyOp.getConf().getMode() == GroupByDesc.Mode.HASH) {
// No need to change for MERGE_PARTIAL etc.
gpbyOp.getConf().setMode(GroupByDesc.Mode.FINAL);
}
}
// insert the dummy store operator here
DummyStoreOperator dummyStoreOp = new TezDummyStoreOperator(mergeJoinOp.getCompilationOpContext());
dummyStoreOp.setConf(new DummyStoreDesc());
dummyStoreOp.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>());
dummyStoreOp.setChildOperators(new ArrayList<Operator<? extends OperatorDesc>>());
dummyStoreOp.getChildOperators().add(mergeJoinOp);
int index = parentOp.getChildOperators().indexOf(mergeJoinOp);
parentOp.getChildOperators().remove(index);
parentOp.getChildOperators().add(index, dummyStoreOp);
dummyStoreOp.getParentOperators().add(parentOp);
mergeJoinOp.getParentOperators().remove(parentIndex);
mergeJoinOp.getParentOperators().add(parentIndex, dummyStoreOp);
}
}
mergeJoinOp.cloneOriginalParentsList(mergeJoinOp.getParentOperators());
}
use of org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator in project hive by apache.
the class MapRecordProcessor method init.
@Override
void init(MRTaskReporter mrReporter, Map<String, LogicalInput> inputs, Map<String, LogicalOutput> outputs) throws Exception {
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
super.init(mrReporter, inputs, outputs);
checkAbortCondition();
String key = processorContext.getTaskVertexName() + MAP_PLAN_KEY;
cacheKeys.add(key);
// create map and fetch operators
if (!isInCompaction) {
mapWork = cache.retrieve(key, () -> Utilities.getMapWork(jconf));
} else {
// During query-based compaction, we don't want to retrieve old MapWork from the cache, we want a new mapper
// and new UDF validate_acid_sort_order instance for each bucket, otherwise validate_acid_sort_order will fail.
mapWork = Utilities.getMapWork(jconf);
}
// TODO HIVE-14042. Cleanup may be required if exiting early.
Utilities.setMapWork(jconf, mapWork);
for (PartitionDesc part : mapWork.getAliasToPartnInfo().values()) {
TableDesc tableDesc = part.getTableDesc();
Utilities.copyJobSecretToTableProperties(tableDesc);
}
String prefixes = jconf.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES);
if (prefixes != null) {
mergeWorkList = new ArrayList<>();
for (final String prefix : prefixes.split(",")) {
if (prefix == null || prefix.isEmpty()) {
continue;
}
key = processorContext.getTaskVertexName() + prefix;
cacheKeys.add(key);
checkAbortCondition();
mergeWorkList.add((MapWork) cache.retrieve(key, () -> Utilities.getMergeWork(jconf, prefix)));
}
}
MapredContext.init(true, new JobConf(jconf));
((TezContext) MapredContext.get()).setInputs(inputs);
((TezContext) MapredContext.get()).setTezProcessorContext(processorContext);
// Update JobConf using MRInput, info like filename comes via this
checkAbortCondition();
legacyMRInput = getMRInput(inputs);
if (legacyMRInput != null) {
Configuration updatedConf = legacyMRInput.getConfigUpdates();
if (updatedConf != null) {
for (Entry<String, String> entry : updatedConf) {
jconf.set(entry.getKey(), entry.getValue());
}
}
}
checkAbortCondition();
createOutputMap();
// Start all the Outputs.
for (Entry<String, LogicalOutput> outputEntry : outputs.entrySet()) {
LOG.debug("Starting Output: " + outputEntry.getKey());
outputEntry.getValue().start();
((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize();
}
checkAbortCondition();
try {
CompilationOpContext runtimeCtx = new CompilationOpContext();
if (mapWork.getVectorMode()) {
mapOp = new VectorMapOperator(runtimeCtx);
} else {
mapOp = new MapOperator(runtimeCtx);
}
// Not synchronizing creation of mapOp with an invocation. Check immediately
// after creation in case abort has been set.
// Relying on the regular flow to clean up the actual operator. i.e. If an exception is
// thrown, an attempt will be made to cleanup the op.
// If we are here - exit out via an exception. If we're in the middle of the opeartor.initialize
// call further down, we rely upon op.abort().
checkAbortCondition();
mapOp.clearConnectedOperators();
mapOp.setExecContext(execContext);
boolean fromCache = false;
if (mergeWorkList != null) {
AbstractMapOperator mergeMapOp = null;
for (BaseWork mergeWork : mergeWorkList) {
// TODO HIVE-14042. What is mergeWork, and why is it not part of the regular operator chain.
// The mergeMapOp.initialize call further down can block, and will not receive information
// about an abort request.
MapWork mergeMapWork = (MapWork) mergeWork;
if (mergeMapWork.getVectorMode()) {
mergeMapOp = new VectorMapOperator(runtimeCtx);
} else {
mergeMapOp = new MapOperator(runtimeCtx);
}
mergeMapOpList.add(mergeMapOp);
// initialize the merge operators first.
if (mergeMapOp != null) {
mergeMapOp.setConf(mergeMapWork);
LOG.info("Input name is {}", mergeMapWork.getName());
jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName());
mergeMapOp.initialize(jconf, null);
// if there are no files/partitions to read, we need to skip trying to read
MultiMRInput multiMRInput = multiMRInputMap.get(mergeMapWork.getName());
boolean skipRead = false;
if (multiMRInput == null) {
LOG.info("Multi MR Input for work {} is null. Skipping read.", mergeMapWork.getName());
skipRead = true;
} else {
Collection<KeyValueReader> keyValueReaders = multiMRInput.getKeyValueReaders();
if ((keyValueReaders == null) || (keyValueReaders.isEmpty())) {
LOG.info("Key value readers are null or empty and hence skipping read. " + "KeyValueReaders = {}", keyValueReaders);
skipRead = true;
}
}
if (skipRead) {
List<Operator<?>> children = new ArrayList<>();
children.addAll(mergeMapOp.getConf().getAliasToWork().values());
// do the same thing as setChildren when there is nothing to read.
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information which we don't have in this case.
mergeMapOp.initEmptyInputChildren(children, jconf);
} else {
// the setChildren method initializes the object inspector needed by the operators
// based on path and partition information.
mergeMapOp.setChildren(jconf);
}
Operator<? extends OperatorDesc> finalOp = getFinalOp(mergeMapOp);
if (finalOp instanceof TezDummyStoreOperator) {
// we ensure that we don't try to read any data in case of skip read.
((TezDummyStoreOperator) finalOp).setFetchDone(skipRead);
mapOp.setConnectedOperators(mergeMapWork.getTag(), (DummyStoreOperator) finalOp);
} else {
// found the plan is already connected which means this is derived from the cache.
fromCache = true;
}
mergeMapOp.passExecContext(new ExecMapperContext(jconf));
mergeMapOp.initializeLocalWork(jconf);
}
}
}
if (!fromCache) {
// if not from cache, we still need to hook up the plans.
((TezContext) (MapredContext.get())).setDummyOpsMap(mapOp.getConnectedOperators());
}
// initialize map operator
mapOp.setConf(mapWork);
LOG.info("Main input name is " + mapWork.getName());
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
mapOp.initialize(jconf, null);
checkAbortCondition();
mapOp.setChildren(jconf);
mapOp.passExecContext(execContext);
LOG.info(mapOp.dump(0));
// set memory available for operators
long memoryAvailableToTask = processorContext.getTotalMemoryAvailableToTask();
if (mapOp.getConf() != null) {
mapOp.getConf().setMaxMemoryAvailable(memoryAvailableToTask);
LOG.info("Memory available for operators set to {}", LlapUtil.humanReadableByteCount(memoryAvailableToTask));
}
OperatorUtils.setMemoryAvailable(mapOp.getChildOperators(), memoryAvailableToTask);
mapOp.initializeLocalWork(jconf);
// Setup values registry
checkAbortCondition();
String valueRegistryKey = DynamicValue.DYNAMIC_VALUE_REGISTRY_CACHE_KEY;
// On LLAP dynamic value registry might already be cached.
final DynamicValueRegistryTez registryTez = dynamicValueCache.retrieve(valueRegistryKey, () -> new DynamicValueRegistryTez());
dynamicValueCacheKeys.add(valueRegistryKey);
RegistryConfTez registryConf = new RegistryConfTez(jconf, mapWork, processorContext, inputs);
registryTez.init(registryConf);
checkAbortCondition();
initializeMapRecordSources();
mapOp.initializeMapOperator(jconf);
if ((mergeMapOpList != null) && !mergeMapOpList.isEmpty()) {
for (AbstractMapOperator mergeMapOp : mergeMapOpList) {
jconf.set(Utilities.INPUT_NAME, mergeMapOp.getConf().getName());
// TODO HIVE-14042. abort handling: Handling of mergeMapOp
mergeMapOp.initializeMapOperator(jconf);
}
}
// Initialization isn't finished until all parents of all operators
// are initialized. For broadcast joins that means initializing the
// dummy parent operators as well.
List<HashTableDummyOperator> dummyOps = mapWork.getDummyOps();
jconf.set(Utilities.INPUT_NAME, mapWork.getName());
if (dummyOps != null) {
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.setExecContext(execContext);
// TODO HIVE-14042. Handling of dummyOps, and propagating abort information to them
dummyOp.initialize(jconf, null);
}
}
OperatorUtils.setChildrenCollector(mapOp.getChildOperators(), outMap);
mapOp.setReporter(reporter);
MapredContext.get().setReporter(reporter);
} catch (Throwable e) {
setAborted(true);
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
} else if (e instanceof InterruptedException) {
LOG.info("Hit an interrupt while initializing MapRecordProcessor. Message={}", e.getMessage());
throw (InterruptedException) e;
} else {
throw new RuntimeException("Map operator initialization failed", e);
}
}
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
}
Aggregations