Search in sources :

Example 1 with EdgeType

use of org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType in project hive by apache.

the class GenTezWork method process.

@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procContext;
    assert context != null && context.currentTask != null && context.currentRootOperator != null;
    // Operator is a file sink or reduce sink. Something that forces
    // a new vertex.
    Operator<?> operator = (Operator<?>) nd;
    // root is the start of the operator pipeline we're currently
    // packing into a vertex, typically a table scan, union or join
    Operator<?> root = context.currentRootOperator;
    LOG.debug("Root operator: " + root);
    LOG.debug("Leaf operator: " + operator);
    if (context.clonedReduceSinks.contains(operator)) {
        // just skip and keep going
        return null;
    }
    TezWork tezWork = context.currentTask.getWork();
    // Right now the work graph is pretty simple. If there is no
    // Preceding work we have a root and will generate a map
    // vertex. If there is a preceding work we will generate
    // a reduce vertex
    BaseWork work;
    if (context.rootToWorkMap.containsKey(root)) {
        // will result into a vertex with multiple FS or RS operators.
        if (context.childToWorkMap.containsKey(operator)) {
            // if we've seen both root and child, we can bail.
            // clear out the mapjoin set. we don't need it anymore.
            context.currentMapJoinOperators.clear();
            // clear out the union set. we don't need it anymore.
            context.currentUnionOperators.clear();
            return null;
        } else {
            // At this point we don't have to do anything special. Just
            // run through the regular paces w/o creating a new task.
            work = context.rootToWorkMap.get(root);
        }
    } else {
        // create a new vertex
        if (context.preceedingWork == null) {
            work = utils.createMapWork(context, root, tezWork, null);
        } else {
            work = GenTezUtils.createReduceWork(context, root, tezWork);
        }
        context.rootToWorkMap.put(root, work);
    }
    // this is where we set the sort columns that we will be using for KeyValueInputMerge
    if (operator instanceof DummyStoreOperator) {
        work.addSortCols(root.getOpTraits().getSortCols().get(0));
    }
    if (!context.childToWorkMap.containsKey(operator)) {
        List<BaseWork> workItems = new LinkedList<BaseWork>();
        workItems.add(work);
        context.childToWorkMap.put(operator, workItems);
    } else {
        context.childToWorkMap.get(operator).add(work);
    }
    // which can affect the working of all downstream transformations.
    if (context.currentMergeJoinOperator != null) {
        // we are currently walking the big table side of the merge join. we need to create or hook up
        // merge join work.
        MergeJoinWork mergeJoinWork = null;
        if (context.opMergeJoinWorkMap.containsKey(context.currentMergeJoinOperator)) {
            // we have found a merge work corresponding to this closing operator. Hook up this work.
            mergeJoinWork = context.opMergeJoinWorkMap.get(context.currentMergeJoinOperator);
        } else {
            // we need to create the merge join work
            mergeJoinWork = new MergeJoinWork();
            mergeJoinWork.setMergeJoinOperator(context.currentMergeJoinOperator);
            tezWork.add(mergeJoinWork);
            context.opMergeJoinWorkMap.put(context.currentMergeJoinOperator, mergeJoinWork);
        }
        // connect the work correctly.
        work.addSortCols(root.getOpTraits().getSortCols().get(0));
        mergeJoinWork.addMergedWork(work, null, context.leafOperatorToFollowingWork);
        Operator<? extends OperatorDesc> parentOp = getParentFromStack(context.currentMergeJoinOperator, stack);
        // Set the big table position. Both the reduce work and merge join operator
        // should be set with the same value.
        //      int pos = context.currentMergeJoinOperator.getTagForOperator(parentOp);
        int pos = context.currentMergeJoinOperator.getConf().getBigTablePosition();
        work.setTag(pos);
        context.currentMergeJoinOperator.getConf().setBigTablePosition(pos);
        tezWork.setVertexType(work, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES);
        for (BaseWork parentWork : tezWork.getParents(work)) {
            TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, work);
            tezWork.disconnect(parentWork, work);
            tezWork.connect(parentWork, mergeJoinWork, edgeProp);
        }
        for (BaseWork childWork : tezWork.getChildren(work)) {
            TezEdgeProperty edgeProp = tezWork.getEdgeProperty(work, childWork);
            tezWork.disconnect(work, childWork);
            tezWork.connect(mergeJoinWork, childWork, edgeProp);
        }
        tezWork.remove(work);
        context.rootToWorkMap.put(root, mergeJoinWork);
        context.childToWorkMap.get(operator).remove(work);
        context.childToWorkMap.get(operator).add(mergeJoinWork);
        work = mergeJoinWork;
        context.currentMergeJoinOperator = null;
    }
    // remember which mapjoin operator links with which work
    if (!context.currentMapJoinOperators.isEmpty()) {
        for (MapJoinOperator mj : context.currentMapJoinOperators) {
            // so we can later run the same logic that is run in ReduceSinkMapJoinProc.
            if (mj.getConf().isDynamicPartitionHashJoin()) {
                // Since this is a dynamic partitioned hash join, the work for this join should be a ReduceWork
                ReduceWork reduceWork = (ReduceWork) work;
                int bigTablePosition = mj.getConf().getPosBigTable();
                reduceWork.setTag(bigTablePosition);
                // Use context.mapJoinParentMap to get the original RS parents, because
                // the MapJoin's parents may have been replaced by dummy operator.
                List<Operator<?>> mapJoinOriginalParents = context.mapJoinParentMap.get(mj);
                if (mapJoinOriginalParents == null) {
                    throw new SemanticException("Unexpected error - context.mapJoinParentMap did not have an entry for " + mj);
                }
                for (int pos = 0; pos < mapJoinOriginalParents.size(); ++pos) {
                    // This processing only needs to happen for the small tables
                    if (pos == bigTablePosition) {
                        continue;
                    }
                    Operator<?> parentOp = mapJoinOriginalParents.get(pos);
                    context.smallTableParentToMapJoinMap.put(parentOp, mj);
                    ReduceSinkOperator parentRS = (ReduceSinkOperator) parentOp;
                    // TableDesc needed for dynamic partitioned hash join
                    GenMapRedUtils.setKeyAndValueDesc(reduceWork, parentRS);
                    // has its ReduceSink parent removed.
                    if (!context.mapJoinToUnprocessedSmallTableReduceSinks.get(mj).contains(parentRS)) {
                        // This reduce sink has been processed already, so the work for the parentRS exists
                        BaseWork parentWork = ReduceSinkMapJoinProc.getMapJoinParentWork(context, parentRS);
                        int tag = parentRS.getConf().getTag();
                        tag = (tag == -1 ? 0 : tag);
                        reduceWork.getTagToInput().put(tag, parentWork.getName());
                    }
                }
            }
            LOG.debug("Processing map join: " + mj);
            // mapjoin later
            if (!context.mapJoinWorkMap.containsKey(mj)) {
                List<BaseWork> workItems = new LinkedList<BaseWork>();
                workItems.add(work);
                context.mapJoinWorkMap.put(mj, workItems);
            } else {
                context.mapJoinWorkMap.get(mj).add(work);
            }
            /*
         * this happens in case of map join operations.
         * The tree looks like this:
         *
         *        RS <--- we are here perhaps
         *        |
         *     MapJoin
         *     /     \
         *   RS       TS
         *  /
         * TS
         *
         * If we are at the RS pointed above, and we may have already visited the
         * RS following the TS, we have already generated work for the TS-RS.
         * We need to hook the current work to this generated work.
         */
            if (context.linkOpWithWorkMap.containsKey(mj)) {
                Map<BaseWork, TezEdgeProperty> linkWorkMap = context.linkOpWithWorkMap.get(mj);
                if (linkWorkMap != null) {
                    // Note: it's not quite clear why this is done inside this if. Seems like it should be on the top level.
                    if (context.linkChildOpWithDummyOp.containsKey(mj)) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Adding dummy ops to work: " + work.getName() + ": " + context.linkChildOpWithDummyOp.get(mj));
                        }
                        for (Operator<?> dummy : context.linkChildOpWithDummyOp.get(mj)) {
                            work.addDummyOp((HashTableDummyOperator) dummy);
                        }
                    }
                    for (Entry<BaseWork, TezEdgeProperty> parentWorkMap : linkWorkMap.entrySet()) {
                        BaseWork parentWork = parentWorkMap.getKey();
                        LOG.debug("connecting " + parentWork.getName() + " with " + work.getName());
                        TezEdgeProperty edgeProp = parentWorkMap.getValue();
                        tezWork.connect(parentWork, work, edgeProp);
                        if (edgeProp.getEdgeType() == EdgeType.CUSTOM_EDGE) {
                            tezWork.setVertexType(work, VertexType.INITIALIZED_EDGES);
                        }
                        // of the downstream work
                        for (ReduceSinkOperator r : context.linkWorkWithReduceSinkMap.get(parentWork)) {
                            if (r.getConf().getOutputName() != null) {
                                LOG.debug("Cloning reduce sink for multi-child broadcast edge");
                                // we've already set this one up. Need to clone for the next work.
                                r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(r.getCompilationOpContext(), (ReduceSinkDesc) r.getConf().clone(), new RowSchema(r.getSchema()), r.getParentOperators());
                                context.clonedReduceSinks.add(r);
                            }
                            r.getConf().setOutputName(work.getName());
                            context.connectedReduceSinks.add(r);
                        }
                    }
                }
            }
        }
        // clear out the set. we don't need it anymore.
        context.currentMapJoinOperators.clear();
    }
    // we might have to connect parent work with this work later.
    for (Operator<?> parent : new ArrayList<Operator<?>>(root.getParentOperators())) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Removing " + parent + " as parent from " + root);
        }
        context.leafOperatorToFollowingWork.remove(parent);
        context.leafOperatorToFollowingWork.put(parent, work);
        root.removeParent(parent);
    }
    if (!context.currentUnionOperators.isEmpty()) {
        // if there are union all operators, it means that the walking context contains union all operators.
        // please see more details of context.currentUnionOperator in GenTezWorkWalker
        UnionWork unionWork;
        if (context.unionWorkMap.containsKey(operator)) {
            // since we've passed this operator before.
            assert operator.getChildOperators().isEmpty();
            unionWork = (UnionWork) context.unionWorkMap.get(operator);
            // finally connect the union work with work
            connectUnionWorkWithWork(unionWork, work, tezWork, context);
        } else {
            // we've not seen this terminal before. we need to check
            // rootUnionWorkMap which contains the information of mapping the root
            // operator of a union work to a union work
            unionWork = context.rootUnionWorkMap.get(root);
            if (unionWork == null) {
                // if unionWork is null, it means it is the first time. we need to
                // create a union work object and add this work to it. Subsequent
                // work should reference the union and not the actual work.
                unionWork = GenTezUtils.createUnionWork(context, root, operator, tezWork);
                // finally connect the union work with work
                connectUnionWorkWithWork(unionWork, work, tezWork, context);
            }
        }
        context.currentUnionOperators.clear();
        work = unionWork;
    }
    // reasons. Roots are data sources, leaves are data sinks. I know.
    if (context.leafOperatorToFollowingWork.containsKey(operator)) {
        BaseWork followingWork = context.leafOperatorToFollowingWork.get(operator);
        long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER);
        LOG.debug("Second pass. Leaf operator: " + operator + " has common downstream work:" + followingWork);
        if (operator instanceof DummyStoreOperator) {
            // this is the small table side.
            assert (followingWork instanceof MergeJoinWork);
            MergeJoinWork mergeJoinWork = (MergeJoinWork) followingWork;
            CommonMergeJoinOperator mergeJoinOp = mergeJoinWork.getMergeJoinOperator();
            work.setTag(mergeJoinOp.getTagForOperator(operator));
            mergeJoinWork.addMergedWork(null, work, context.leafOperatorToFollowingWork);
            tezWork.setVertexType(mergeJoinWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES);
            for (BaseWork parentWork : tezWork.getParents(work)) {
                TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, work);
                tezWork.disconnect(parentWork, work);
                tezWork.connect(parentWork, mergeJoinWork, edgeProp);
            }
            work = mergeJoinWork;
        } else {
            // need to add this branch to the key + value info
            assert operator instanceof ReduceSinkOperator && ((followingWork instanceof ReduceWork) || (followingWork instanceof MergeJoinWork) || followingWork instanceof UnionWork);
            ReduceSinkOperator rs = (ReduceSinkOperator) operator;
            ReduceWork rWork = null;
            if (followingWork instanceof MergeJoinWork) {
                MergeJoinWork mergeJoinWork = (MergeJoinWork) followingWork;
                rWork = (ReduceWork) mergeJoinWork.getMainWork();
            } else if (followingWork instanceof UnionWork) {
                // this can only be possible if there is merge work followed by the union
                UnionWork unionWork = (UnionWork) followingWork;
                int index = getFollowingWorkIndex(tezWork, unionWork, rs);
                BaseWork baseWork = tezWork.getChildren(unionWork).get(index);
                if (baseWork instanceof MergeJoinWork) {
                    MergeJoinWork mergeJoinWork = (MergeJoinWork) baseWork;
                    // disconnect the connection to union work and connect to merge work
                    followingWork = mergeJoinWork;
                    rWork = (ReduceWork) mergeJoinWork.getMainWork();
                } else {
                    rWork = (ReduceWork) baseWork;
                }
            } else {
                rWork = (ReduceWork) followingWork;
            }
            GenMapRedUtils.setKeyAndValueDesc(rWork, rs);
            // remember which parent belongs to which tag
            int tag = rs.getConf().getTag();
            rWork.getTagToInput().put(tag == -1 ? 0 : tag, work.getName());
            // remember the output name of the reduce sink
            rs.getConf().setOutputName(rWork.getName());
            // For dynamic partitioned hash join, run the ReduceSinkMapJoinProc logic for any
            // ReduceSink parents that we missed.
            MapJoinOperator mj = context.smallTableParentToMapJoinMap.get(rs);
            if (mj != null) {
                // Only need to run the logic for tables we missed
                if (context.mapJoinToUnprocessedSmallTableReduceSinks.get(mj).contains(rs)) {
                    // ReduceSinkMapJoinProc logic does not work unless the ReduceSink is connected as
                    // a parent of the MapJoin, but at this point we have already removed all of the
                    // parents from the MapJoin.
                    // Try temporarily adding the RS as a parent
                    ArrayList<Operator<?>> tempMJParents = new ArrayList<Operator<?>>();
                    tempMJParents.add(rs);
                    mj.setParentOperators(tempMJParents);
                    // ReduceSink also needs MapJoin as child
                    List<Operator<?>> rsChildren = rs.getChildOperators();
                    rsChildren.add(mj);
                    // Since the MapJoin has had all of its other parents removed at this point,
                    // it would be bad here if processReduceSinkToHashJoin() tries to do anything
                    // with the RS parent based on its position in the list of parents.
                    ReduceSinkMapJoinProc.processReduceSinkToHashJoin(rs, mj, context);
                    // Remove any parents from MapJoin again
                    mj.removeParents();
                // TODO: do we also need to remove the MapJoin from the list of RS's children?
                }
            }
            if (!context.connectedReduceSinks.contains(rs)) {
                // add dependency between the two work items
                TezEdgeProperty edgeProp;
                EdgeType edgeType = GenTezUtils.determineEdgeType(work, followingWork, rs);
                if (rWork.isAutoReduceParallelism()) {
                    edgeProp = new TezEdgeProperty(context.conf, edgeType, true, rWork.getMinReduceTasks(), rWork.getMaxReduceTasks(), bytesPerReducer);
                } else {
                    edgeProp = new TezEdgeProperty(edgeType);
                }
                tezWork.connect(work, followingWork, edgeProp);
                context.connectedReduceSinks.add(rs);
            }
        }
    } else {
        LOG.debug("First pass. Leaf operator: " + operator);
    }
    // the next item will be a new root.
    if (!operator.getChildOperators().isEmpty()) {
        assert operator.getChildOperators().size() == 1;
        context.parentOfRoot = operator;
        context.currentRootOperator = operator.getChildOperators().get(0);
        context.preceedingWork = work;
    }
    return null;
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) ArrayList(java.util.ArrayList) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) MergeJoinWork(org.apache.hadoop.hive.ql.plan.MergeJoinWork) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) UnionWork(org.apache.hadoop.hive.ql.plan.UnionWork) ReduceWork(org.apache.hadoop.hive.ql.plan.ReduceWork) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) LinkedList(java.util.LinkedList) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 2 with EdgeType

use of org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType in project hive by apache.

the class DagUtils method createEdge.

/**
   * Given a Vertex group and a vertex createEdge will create an
   * Edge between them.
   *
   * @param group The parent VertexGroup
   * @param vConf The job conf of one of the parrent (grouped) vertices
   * @param w The child vertex
   * @param edgeProp the edge property of connection between the two
   * endpoints.
   */
@SuppressWarnings("rawtypes")
public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, Vertex w, TezEdgeProperty edgeProp, VertexType vertexType) throws IOException {
    Class mergeInputClass;
    LOG.info("Creating Edge between " + group.getGroupName() + " and " + w.getName());
    EdgeType edgeType = edgeProp.getEdgeType();
    switch(edgeType) {
        case BROADCAST_EDGE:
            mergeInputClass = ConcatenatedMergedKeyValueInput.class;
            break;
        case CUSTOM_EDGE:
            {
                mergeInputClass = ConcatenatedMergedKeyValueInput.class;
                int numBuckets = edgeProp.getNumBuckets();
                CustomVertexConfiguration vertexConf = new CustomVertexConfiguration(numBuckets, vertexType);
                DataOutputBuffer dob = new DataOutputBuffer();
                vertexConf.write(dob);
                VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName());
                byte[] userPayloadBytes = dob.getData();
                ByteBuffer userPayload = ByteBuffer.wrap(userPayloadBytes);
                desc.setUserPayload(UserPayload.create(userPayload));
                w.setVertexManagerPlugin(desc);
                break;
            }
        case CUSTOM_SIMPLE_EDGE:
            mergeInputClass = ConcatenatedMergedKeyValueInput.class;
            break;
        case SIMPLE_EDGE:
            setupAutoReducerParallelism(edgeProp, w);
        default:
            mergeInputClass = TezMergedLogicalInput.class;
            break;
    }
    return GroupInputEdge.create(group, w, createEdgeProperty(edgeProp, vConf), InputDescriptor.create(mergeInputClass.getName()));
}
Also used : ConcatenatedMergedKeyValueInput(org.apache.tez.runtime.library.input.ConcatenatedMergedKeyValueInput) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) VertexManagerPluginDescriptor(org.apache.tez.dag.api.VertexManagerPluginDescriptor) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) ByteBuffer(java.nio.ByteBuffer)

Example 3 with EdgeType

use of org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType in project hive by apache.

the class DagUtils method createEdgeProperty.

/*
   * Helper function to create an edge property from an edge type.
   */
private EdgeProperty createEdgeProperty(TezEdgeProperty edgeProp, Configuration conf) throws IOException {
    MRHelpers.translateMRConfToTez(conf);
    String keyClass = conf.get(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS);
    String valClass = conf.get(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS);
    String partitionerClassName = conf.get("mapred.partitioner.class");
    Map<String, String> partitionerConf;
    EdgeType edgeType = edgeProp.getEdgeType();
    switch(edgeType) {
        case BROADCAST_EDGE:
            UnorderedKVEdgeConfig et1Conf = UnorderedKVEdgeConfig.newBuilder(keyClass, valClass).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
            return et1Conf.createDefaultBroadcastEdgeProperty();
        case CUSTOM_EDGE:
            assert partitionerClassName != null;
            partitionerConf = createPartitionerConf(partitionerClassName, conf);
            UnorderedPartitionedKVEdgeConfig et2Conf = UnorderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
            EdgeManagerPluginDescriptor edgeDesc = EdgeManagerPluginDescriptor.create(CustomPartitionEdge.class.getName());
            CustomEdgeConfiguration edgeConf = new CustomEdgeConfiguration(edgeProp.getNumBuckets(), null);
            DataOutputBuffer dob = new DataOutputBuffer();
            edgeConf.write(dob);
            byte[] userPayload = dob.getData();
            edgeDesc.setUserPayload(UserPayload.create(ByteBuffer.wrap(userPayload)));
            return et2Conf.createDefaultCustomEdgeProperty(edgeDesc);
        case CUSTOM_SIMPLE_EDGE:
            assert partitionerClassName != null;
            partitionerConf = createPartitionerConf(partitionerClassName, conf);
            UnorderedPartitionedKVEdgeConfig et3Conf = UnorderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
            return et3Conf.createDefaultEdgeProperty();
        case SIMPLE_EDGE:
        default:
            assert partitionerClassName != null;
            partitionerConf = createPartitionerConf(partitionerClassName, conf);
            OrderedPartitionedKVEdgeConfig et4Conf = OrderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), TezBytesComparator.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
            return et4Conf.createDefaultEdgeProperty();
    }
}
Also used : OrderedPartitionedKVEdgeConfig(org.apache.tez.runtime.library.conf.OrderedPartitionedKVEdgeConfig) MRPartitioner(org.apache.tez.mapreduce.partition.MRPartitioner) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) TezBytesComparator(org.apache.tez.runtime.library.common.comparator.TezBytesComparator) UnorderedKVEdgeConfig(org.apache.tez.runtime.library.conf.UnorderedKVEdgeConfig) EdgeManagerPluginDescriptor(org.apache.tez.dag.api.EdgeManagerPluginDescriptor) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) UnorderedPartitionedKVEdgeConfig(org.apache.tez.runtime.library.conf.UnorderedPartitionedKVEdgeConfig) TezBytesWritableSerialization(org.apache.tez.runtime.library.common.serializer.TezBytesWritableSerialization)

Example 4 with EdgeType

use of org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType in project hive by apache.

the class TezTask method build.

DAG build(JobConf conf, TezWork work, Path scratchDir, LocalResource appJarLr, List<LocalResource> additionalLr, Context ctx) throws Exception {
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
    // getAllWork returns a topologically sorted list, which we use to make
    // sure that vertices are created before they are used in edges.
    List<BaseWork> ws = work.getAllWork();
    Collections.reverse(ws);
    FileSystem fs = scratchDir.getFileSystem(conf);
    // the name of the dag is what is displayed in the AM/Job UI
    String dagName = utils.createDagName(conf, queryPlan);
    LOG.info("Dag name: " + dagName);
    DAG dag = DAG.create(dagName);
    // set some info for the query
    JSONObject json = new JSONObject(new LinkedHashMap()).put("context", "Hive").put("description", ctx.getCmd());
    String dagInfo = json.toString();
    if (LOG.isDebugEnabled()) {
        LOG.debug("DagInfo: " + dagInfo);
    }
    dag.setDAGInfo(dagInfo);
    dag.setCredentials(conf.getCredentials());
    setAccessControlsForCurrentUser(dag, queryPlan.getQueryId(), conf);
    for (BaseWork w : ws) {
        boolean isFinal = work.getLeaves().contains(w);
        // translate work to vertex
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + w.getName());
        if (w instanceof UnionWork) {
            // Special case for unions. These items translate to VertexGroups
            List<BaseWork> unionWorkItems = new LinkedList<BaseWork>();
            List<BaseWork> children = new LinkedList<BaseWork>();
            // proper children of the union
            for (BaseWork v : work.getChildren(w)) {
                EdgeType type = work.getEdgeProperty(w, v).getEdgeType();
                if (type == EdgeType.CONTAINS) {
                    unionWorkItems.add(v);
                } else {
                    children.add(v);
                }
            }
            // create VertexGroup
            Vertex[] vertexArray = new Vertex[unionWorkItems.size()];
            int i = 0;
            for (BaseWork v : unionWorkItems) {
                vertexArray[i++] = workToVertex.get(v);
            }
            VertexGroup group = dag.createVertexGroup(w.getName(), vertexArray);
            // For a vertex group, all Outputs use the same Key-class, Val-class and partitioner.
            // Pick any one source vertex to figure out the Edge configuration.
            JobConf parentConf = workToConf.get(unionWorkItems.get(0));
            // now hook up the children
            for (BaseWork v : children) {
                // finally we can create the grouped edge
                GroupInputEdge e = utils.createEdge(group, parentConf, workToVertex.get(v), work.getEdgeProperty(w, v), work.getVertexType(v));
                dag.addEdge(e);
            }
        } else {
            // Regular vertices
            JobConf wxConf = utils.initializeVertexConf(conf, ctx, w);
            Vertex wx = utils.createVertex(wxConf, w, scratchDir, appJarLr, additionalLr, fs, ctx, !isFinal, work, work.getVertexType(w));
            if (w.getReservedMemoryMB() > 0) {
                // If reversedMemoryMB is set, make memory allocation fraction adjustment as needed
                double frac = DagUtils.adjustMemoryReserveFraction(w.getReservedMemoryMB(), super.conf);
                LOG.info("Setting " + TEZ_MEMORY_RESERVE_FRACTION + " to " + frac);
                wx.setConf(TEZ_MEMORY_RESERVE_FRACTION, Double.toString(frac));
            }
            // Otherwise just leave it up to Tez to decide how much memory to allocate
            dag.addVertex(wx);
            utils.addCredentials(w, dag);
            perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + w.getName());
            workToVertex.put(w, wx);
            workToConf.put(w, wxConf);
            // add all dependencies (i.e.: edges) to the graph
            for (BaseWork v : work.getChildren(w)) {
                assert workToVertex.containsKey(v);
                Edge e = null;
                TezEdgeProperty edgeProp = work.getEdgeProperty(w, v);
                e = utils.createEdge(wxConf, wx, workToVertex.get(v), edgeProp, work.getVertexType(v));
                dag.addEdge(e);
            }
        }
    }
    // Clear the work map after build. TODO: remove caching instead?
    Utilities.clearWorkMap(conf);
    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
    return dag;
}
Also used : Vertex(org.apache.tez.dag.api.Vertex) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) UnionWork(org.apache.hadoop.hive.ql.plan.UnionWork) DAG(org.apache.tez.dag.api.DAG) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) LinkedList(java.util.LinkedList) LinkedHashMap(java.util.LinkedHashMap) VertexGroup(org.apache.tez.dag.api.VertexGroup) JSONObject(org.json.JSONObject) FileSystem(org.apache.hadoop.fs.FileSystem) GroupInputEdge(org.apache.tez.dag.api.GroupInputEdge) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) JobConf(org.apache.hadoop.mapred.JobConf) Edge(org.apache.tez.dag.api.Edge) GroupInputEdge(org.apache.tez.dag.api.GroupInputEdge)

Example 5 with EdgeType

use of org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType in project hive by apache.

the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.

public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // Mark this small table as being processed
    if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
    }
    List<BaseWork> mapJoinWork = null;
    /*
     *  if there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    BaseWork parentWork = getMapJoinParentWork(context, parentRS);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    MapJoinDesc joinConf = mapJoinOp.getConf();
    long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
    long tableSize = Long.MAX_VALUE;
    Statistics stats = parentRS.getStatistics();
    if (stats != null) {
        keyCount = rowCount = stats.getNumRows();
        if (keyCount <= 0) {
            keyCount = rowCount = Long.MAX_VALUE;
        }
        tableSize = stats.getDataSize();
        ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
        if (keyCols != null && !keyCols.isEmpty()) {
            // See if we can arrive at a smaller number using distinct stats from key columns.
            long maxKeyCount = 1;
            String prefix = Utilities.ReduceField.KEY.toString();
            for (String keyCol : keyCols) {
                ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
                ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
                if (cs == null || cs.getCountDistint() <= 0) {
                    maxKeyCount = Long.MAX_VALUE;
                    break;
                }
                maxKeyCount *= cs.getCountDistint();
                if (maxKeyCount >= keyCount) {
                    break;
                }
            }
            keyCount = Math.min(maxKeyCount, keyCount);
        }
        if (joinConf.isBucketMapJoin()) {
            OpTraits opTraits = mapJoinOp.getOpTraits();
            bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
            if (bucketCount > 0) {
                // We cannot obtain a better estimate without CustomPartitionVertex providing it
                // to us somehow; in which case using statistics would be completely unnecessary.
                keyCount /= bucketCount;
                tableSize /= bucketCount;
            }
        } else if (joinConf.isDynamicPartitionHashJoin()) {
            // For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
            bucketCount = parentRS.getConf().getNumReducers();
            keyCount /= bucketCount;
            tableSize /= bucketCount;
        }
    }
    if (keyCount == 0) {
        keyCount = 1;
    }
    if (tableSize == 0) {
        tableSize = 1;
    }
    LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = )" + joinConf.isBucketMapJoin() + ", pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
    joinConf.getParentToInput().put(pos, parentWork.getName());
    if (keyCount != Long.MAX_VALUE) {
        joinConf.getParentKeyCounts().put(pos, keyCount);
    }
    joinConf.getParentDataSizes().put(pos, tableSize);
    int numBuckets = -1;
    EdgeType edgeType = EdgeType.BROADCAST_EDGE;
    if (joinConf.isBucketMapJoin()) {
        numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
        /*
       * Here, we can be in one of 4 states.
       *
       * 1. If map join work is null implies that we have not yet traversed the big table side. We
       * just need to see if we can find a reduce sink operator in the big table side. This would
       * imply a reduce side operation.
       *
       * 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
       *
       * 3. If we have already created a work item for the big table side, we need to see if we can
       * find a table scan operator in the big table side. This would imply a map side operation.
       *
       * 4. If we don't find a table scan operator, it has to be a reduce side operation.
       */
        if (mapJoinWork == null) {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
            if (rootOp == null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        } else {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
            if (rootOp != null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        }
    } else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
    }
    if (edgeType == EdgeType.CUSTOM_EDGE) {
        // disable auto parallelism for bucket map joins
        parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
    }
    TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            TezWork tezWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            tezWork.connect(parentWork, myWork, edgeProp);
            if (edgeType == EdgeType.CUSTOM_EDGE) {
                tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
            }
            ReduceSinkOperator r = null;
            if (context.connectedReduceSinks.contains(parentRS)) {
                LOG.debug("Cloning reduce sink for multi-child broadcast edge");
                // we've already set this one up. Need to clone for the next work.
                r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
                context.clonedReduceSinks.add(r);
            } else {
                r = parentRS;
            }
            // remember the output name of the reduce sink
            r.getConf().setOutputName(myWork.getName());
            context.connectedReduceSinks.add(r);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    @SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (ExprNodeDesc k : keyCols) {
        keyOrder.append("+");
        keyNullOrder.append("a");
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) ArrayList(java.util.ArrayList) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) ArrayList(java.util.ArrayList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Aggregations

EdgeType (org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType)6 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)3 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)3 TezEdgeProperty (org.apache.hadoop.hive.ql.plan.TezEdgeProperty)3 ArrayList (java.util.ArrayList)2 LinkedList (java.util.LinkedList)2 HashTableDummyOperator (org.apache.hadoop.hive.ql.exec.HashTableDummyOperator)2 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)2 Operator (org.apache.hadoop.hive.ql.exec.Operator)2 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)2 TezWork (org.apache.hadoop.hive.ql.plan.TezWork)2 UnionWork (org.apache.hadoop.hive.ql.plan.UnionWork)2 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)2 ByteBuffer (java.nio.ByteBuffer)1 LinkedHashMap (java.util.LinkedHashMap)1 List (java.util.List)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 CommonMergeJoinOperator (org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator)1 DummyStoreOperator (org.apache.hadoop.hive.ql.exec.DummyStoreOperator)1 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)1