Search in sources :

Example 11 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class Foreman method runPhysicalPlan.

private void runPhysicalPlan(final PhysicalPlan plan) throws ExecutionSetupException {
    validatePlan(plan);
    queryRM.visitAbstractPlan(plan);
    final QueryWorkUnit work = getQueryWorkUnit(plan);
    queryRM.visitPhysicalPlan(work);
    queryRM.setCost(plan.totalCost());
    queryManager.setTotalCost(plan.totalCost());
    work.applyPlan(drillbitContext.getPlanReader());
    logWorkUnit(work);
    fragmentsRunner.setFragmentsInfo(work.getFragments(), work.getRootFragment(), work.getRootOperator());
    startQueryProcessing();
}
Also used : QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit)

Example 12 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class TestPartitionSender method testThreadsHelper.

/**
 * Core of the testing
 * @param hashToRandomExchange
 * @param drillbitContext
 * @param options
 * @param incoming
 * @param registry
 * @param planReader
 * @param planningSet
 * @param rootFragment
 * @param expectedThreadsCount
 * @throws Exception
 */
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
    final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
    final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
    qwu.applyPlan(planReader);
    final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
    for (PlanFragment planFragment : qwu.getFragments()) {
        if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
            continue;
        }
        MockPartitionSenderRootExec partionSenderRootExec = null;
        FragmentContextImpl context = null;
        try {
            context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
            final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
            final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
            partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
            assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
            partionSenderRootExec.createPartitioner();
            final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
            assertNotNull(partDecor);
            List<Partitioner> partitioners = partDecor.getPartitioners();
            assertNotNull(partitioners);
            final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
            assertEquals("Number of partitioners", actualThreads, partitioners.size());
            for (int i = 0; i < mfEndPoints.size(); i++) {
                assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
            }
            // check distribution of PartitionOutgoingBatch - should be even distribution
            boolean isFirst = true;
            int prevBatchCountSize = 0;
            int batchCountSize = 0;
            for (Partitioner part : partitioners) {
                final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
                batchCountSize = outBatch.size();
                if (!isFirst) {
                    assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
                } else {
                    isFirst = false;
                }
                prevBatchCountSize = batchCountSize;
            }
            partionSenderRootExec.getStats().startProcessing();
            try {
                partDecor.partitionBatch(incoming);
            } finally {
                partionSenderRootExec.getStats().stopProcessing();
            }
            if (actualThreads == 1) {
                assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
            }
            // testing values distribution
            partitioners = partDecor.getPartitioners();
            isFirst = true;
            // since we have fake Nullvector distribution is skewed
            for (Partitioner part : partitioners) {
                final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
                for (PartitionOutgoingBatch partOutBatch : outBatches) {
                    final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
                    if (isFirst) {
                        assertEquals("RecordCount", 100, recordCount);
                        isFirst = false;
                    } else {
                        assertEquals("RecordCount", 0, recordCount);
                    }
                }
            }
            // test exceptions within threads
            // test stats merging
            partionSenderRootExec.getStats().startProcessing();
            try {
                partDecor.executeMethodLogic(new InjectExceptionTest());
                fail("Should throw IOException here");
            } catch (IOException ioe) {
                final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
                partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
                final List<MetricValue> metrics = oPBuilder.getMetricList();
                for (MetricValue metric : metrics) {
                    if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
                        assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
                    }
                    if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
                        assertEquals(actualThreads, metric.getLongValue());
                    }
                }
                assertEquals(actualThreads - 1, ioe.getSuppressed().length);
            } finally {
                partionSenderRootExec.getStats().stopProcessing();
            }
        } finally {
            // cleanup
            partionSenderRootExec.close();
            context.close();
        }
    }
}
Also used : HashPartitionSender(org.apache.drill.exec.physical.config.HashPartitionSender) VectorAccessible(org.apache.drill.exec.record.VectorAccessible) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) FragmentContextImpl(org.apache.drill.exec.ops.FragmentContextImpl) IOException(java.io.IOException) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint) MetricValue(org.apache.drill.exec.proto.UserBitShared.MetricValue) List(java.util.List) OptionList(org.apache.drill.exec.server.options.OptionList) QueryContextInformation(org.apache.drill.exec.proto.BitControl.QueryContextInformation)

Example 13 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class SimpleParallelizer method generateWorkUnit.

protected QueryWorkUnit generateWorkUnit(OptionList options, DrillbitEndpoint foremanNode, QueryId queryId, Fragment rootNode, PlanningSet planningSet, UserSession session, QueryContextInformation queryContextInfo) throws ExecutionSetupException {
    List<MinorFragmentDefn> fragmentDefns = new ArrayList<>();
    MinorFragmentDefn rootFragmentDefn = null;
    FragmentRoot rootOperator = null;
    // assigned before we can materialize, so we start a new loop here rather than utilizing the previous one.
    for (Wrapper wrapper : planningSet) {
        Fragment node = wrapper.getNode();
        final PhysicalOperator physicalOperatorRoot = node.getRoot();
        boolean isRootNode = rootNode == node;
        if (isRootNode && wrapper.getWidth() != 1) {
            throw new ForemanSetupException(String.format("Failure while trying to setup fragment. " + "The root fragment must always have parallelization one. In the current case, the width was set to %d.", wrapper.getWidth()));
        }
        // a fragment is self driven if it doesn't rely on any other exchanges.
        boolean isLeafFragment = node.getReceivingExchangePairs().size() == 0;
        // Create a minorFragment for each major fragment.
        for (int minorFragmentId = 0; minorFragmentId < wrapper.getWidth(); minorFragmentId++) {
            IndexedFragmentNode iNode = new IndexedFragmentNode(minorFragmentId, wrapper);
            wrapper.resetAllocation();
            PhysicalOperator op = physicalOperatorRoot.accept(Materializer.INSTANCE, iNode);
            Preconditions.checkArgument(op instanceof FragmentRoot);
            FragmentRoot root = (FragmentRoot) op;
            FragmentHandle handle = // 
            FragmentHandle.newBuilder().setMajorFragmentId(// 
            wrapper.getMajorFragmentId()).setMinorFragmentId(// 
            minorFragmentId).setQueryId(// 
            queryId).build();
            PlanFragment fragment = // 
            PlanFragment.newBuilder().setForeman(// 
            foremanNode).setHandle(// 
            handle).setAssignment(// 
            wrapper.getAssignedEndpoint(minorFragmentId)).setLeafFragment(// 
            isLeafFragment).setContext(queryContextInfo).setMemInitial(// 
            wrapper.getInitialAllocation()).setMemMax(wrapper.getMaxAllocation()).setCredentials(session.getCredentials()).addAllCollector(CountRequiredFragments.getCollectors(root)).build();
            MinorFragmentDefn fragmentDefn = new MinorFragmentDefn(fragment, root, options);
            if (isRootNode) {
                logger.debug("Root fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
                rootFragmentDefn = fragmentDefn;
                rootOperator = root;
            } else {
                logger.debug("Remote fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
                fragmentDefns.add(fragmentDefn);
            }
        }
    }
    return new QueryWorkUnit(rootOperator, rootFragmentDefn, fragmentDefns);
}
Also used : MinorFragmentDefn(org.apache.drill.exec.work.QueryWorkUnit.MinorFragmentDefn) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) ArrayList(java.util.ArrayList) FragmentRoot(org.apache.drill.exec.physical.base.FragmentRoot) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) IndexedFragmentNode(org.apache.drill.exec.planner.fragment.Materializer.IndexedFragmentNode) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) ForemanSetupException(org.apache.drill.exec.work.foreman.ForemanSetupException)

Example 14 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class TestLocalExchange method testHelperVerifyPartitionSenderParallelization.

// Verify the number of partition senders in a major fragments is not more than the cluster size and each endpoint
// in the cluster has at most one fragment from a given major fragment that has the partition sender.
private static void testHelperVerifyPartitionSenderParallelization(String plan, boolean isMuxOn, boolean isDeMuxOn) throws Exception {
    final DrillbitContext drillbitContext = getDrillbitContext();
    final PhysicalPlanReader planReader = drillbitContext.getPlanReader();
    final Fragment rootFragment = PopUnitTestBase.getRootFragmentFromPlanString(planReader, plan);
    final List<Integer> deMuxFragments = Lists.newLinkedList();
    final List<Integer> htrFragments = Lists.newLinkedList();
    final PlanningSet planningSet = new PlanningSet();
    // Create a planningSet to get the assignment of major fragment ids to fragments.
    PARALLELIZER.initFragmentWrappers(rootFragment, planningSet);
    findFragmentsWithPartitionSender(rootFragment, planningSet, deMuxFragments, htrFragments);
    final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
    QueryWorkUnit qwu = PARALLELIZER.getFragments(new OptionList(), drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
    qwu.applyPlan(planReader);
    // Make sure the number of minor fragments with HashPartitioner within a major fragment is not more than the
    // number of Drillbits in cluster
    ArrayListMultimap<Integer, DrillbitEndpoint> partitionSenderMap = ArrayListMultimap.create();
    for (PlanFragment planFragment : qwu.getFragments()) {
        if (planFragment.getFragmentJson().contains("hash-partition-sender")) {
            int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
            DrillbitEndpoint assignedEndpoint = planFragment.getAssignment();
            partitionSenderMap.get(majorFragmentId).add(assignedEndpoint);
        }
    }
    if (isMuxOn) {
        verifyAssignment(htrFragments, partitionSenderMap);
    }
    if (isDeMuxOn) {
        verifyAssignment(deMuxFragments, partitionSenderMap);
    }
}
Also used : DrillbitContext(org.apache.drill.exec.server.DrillbitContext) PhysicalPlanReader(org.apache.drill.exec.planner.PhysicalPlanReader) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) PlanningSet(org.apache.drill.exec.planner.fragment.PlanningSet) QueryContextInformation(org.apache.drill.exec.proto.BitControl.QueryContextInformation) OptionList(org.apache.drill.exec.server.options.OptionList)

Example 15 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class PlanSplitter method getFragments.

private List<PlanFragment> getFragments(final DrillbitContext dContext, final GetQueryPlanFragments req, final QueryContext queryContext, final QueryId queryId) throws Exception {
    final PhysicalPlan plan;
    final String query = req.getQuery();
    switch(req.getType()) {
        case SQL:
            final Pointer<String> textPlan = new Pointer<>();
            plan = DrillSqlWorker.getPlan(queryContext, query, textPlan);
            break;
        case PHYSICAL:
            plan = dContext.getPlanReader().readPhysicalPlan(query);
            break;
        default:
            throw new IllegalStateException("Planning fragments supports only SQL or PHYSICAL QueryType");
    }
    QueryResourceAllocator planner = dContext.getResourceManager().newResourceAllocator(queryContext);
    planner.visitAbstractPlan(plan);
    final PhysicalOperator rootOperator = plan.getSortedOperators(false).iterator().next();
    final Fragment rootFragment = rootOperator.accept(MakeFragmentsVisitor.INSTANCE, null);
    final SimpleParallelizer parallelizer = new SplittingParallelizer(queryContext);
    List<PlanFragment> fragments = Lists.newArrayList();
    if (req.getSplitPlan()) {
        final List<QueryWorkUnit> queryWorkUnits = parallelizer.getSplitFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), dContext.getPlanReader(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
        for (QueryWorkUnit queryWorkUnit : queryWorkUnits) {
            planner.visitPhysicalPlan(queryWorkUnit);
            queryWorkUnit.applyPlan(dContext.getPlanReader());
            fragments.add(queryWorkUnit.getRootFragment());
            List<PlanFragment> childFragments = queryWorkUnit.getFragments();
            if (!childFragments.isEmpty()) {
                throw new IllegalStateException("Split plans can not have more then one fragment");
            }
        }
    } else {
        final QueryWorkUnit queryWorkUnit = parallelizer.getFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
        planner.visitPhysicalPlan(queryWorkUnit);
        queryWorkUnit.applyPlan(dContext.getPlanReader());
        fragments.add(queryWorkUnit.getRootFragment());
        fragments.addAll(queryWorkUnit.getFragments());
    }
    return fragments;
}
Also used : PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) SimpleParallelizer(org.apache.drill.exec.planner.fragment.SimpleParallelizer) Pointer(org.apache.drill.exec.util.Pointer) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) SplittingParallelizer(org.apache.drill.exec.planner.fragment.contrib.SplittingParallelizer) QueryResourceAllocator(org.apache.drill.exec.work.foreman.rm.QueryResourceAllocator)

Aggregations

QueryWorkUnit (org.apache.drill.exec.work.QueryWorkUnit)17 PlanFragment (org.apache.drill.exec.proto.BitControl.PlanFragment)14 DrillbitEndpoint (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)10 Fragment (org.apache.drill.exec.planner.fragment.Fragment)9 PhysicalOperator (org.apache.drill.exec.physical.base.PhysicalOperator)8 QueryContextInformation (org.apache.drill.exec.proto.BitControl.QueryContextInformation)6 FragmentHandle (org.apache.drill.exec.proto.ExecProtos.FragmentHandle)6 OptionList (org.apache.drill.exec.server.options.OptionList)6 MinorFragmentEndpoint (org.apache.drill.exec.physical.MinorFragmentEndpoint)5 FragmentRoot (org.apache.drill.exec.physical.base.FragmentRoot)5 IndexedFragmentNode (org.apache.drill.exec.planner.fragment.Materializer.IndexedFragmentNode)5 SimpleParallelizer (org.apache.drill.exec.planner.fragment.SimpleParallelizer)5 ForemanSetupException (org.apache.drill.exec.work.foreman.ForemanSetupException)5 PhysicalPlanReader (org.apache.drill.exec.planner.PhysicalPlanReader)4 MinorFragmentDefn (org.apache.drill.exec.work.QueryWorkUnit.MinorFragmentDefn)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 FragmentContextImpl (org.apache.drill.exec.ops.FragmentContextImpl)2 PhysicalPlan (org.apache.drill.exec.physical.PhysicalPlan)2