Search in sources :

Example 1 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by apache.

the class Foreman method getQueryWorkUnit.

private QueryWorkUnit getQueryWorkUnit(final PhysicalPlan plan) throws ExecutionSetupException {
    final PhysicalOperator rootOperator = plan.getSortedOperators(false).iterator().next();
    final Fragment rootFragment = rootOperator.accept(MakeFragmentsVisitor.INSTANCE, null);
    final SimpleParallelizer parallelizer = new SimpleParallelizer(queryContext);
    final QueryWorkUnit queryWorkUnit = parallelizer.getFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), drillbitContext.getPlanReader(), rootFragment, initiatingClient.getSession(), queryContext.getQueryContextInfo());
    if (logger.isTraceEnabled()) {
        final StringBuilder sb = new StringBuilder();
        sb.append("PlanFragments for query ");
        sb.append(queryId);
        sb.append('\n');
        final List<PlanFragment> planFragments = queryWorkUnit.getFragments();
        final int fragmentCount = planFragments.size();
        int fragmentIndex = 0;
        for (final PlanFragment planFragment : planFragments) {
            final FragmentHandle fragmentHandle = planFragment.getHandle();
            sb.append("PlanFragment(");
            sb.append(++fragmentIndex);
            sb.append('/');
            sb.append(fragmentCount);
            sb.append(") major_fragment_id ");
            sb.append(fragmentHandle.getMajorFragmentId());
            sb.append(" minor_fragment_id ");
            sb.append(fragmentHandle.getMinorFragmentId());
            sb.append('\n');
            final DrillbitEndpoint endpointAssignment = planFragment.getAssignment();
            sb.append("  DrillbitEndpoint address ");
            sb.append(endpointAssignment.getAddress());
            sb.append('\n');
            String jsonString = "<<malformed JSON>>";
            sb.append("  fragment_json: ");
            final ObjectMapper objectMapper = new ObjectMapper();
            try {
                final Object json = objectMapper.readValue(planFragment.getFragmentJson(), Object.class);
                jsonString = objectMapper.defaultPrettyPrintingWriter().writeValueAsString(json);
            } catch (final Exception e) {
            // we've already set jsonString to a fallback value
            }
            sb.append(jsonString);
            logger.trace(sb.toString());
        }
    }
    return queryWorkUnit;
}
Also used : QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) SimpleParallelizer(org.apache.drill.exec.planner.fragment.SimpleParallelizer) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) UserException(org.apache.drill.common.exceptions.UserException) RpcException(org.apache.drill.exec.rpc.RpcException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) OptimizerException(org.apache.drill.exec.exception.OptimizerException) OutOfMemoryException(org.apache.drill.exec.exception.OutOfMemoryException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) ObjectMapper(org.codehaus.jackson.map.ObjectMapper)

Example 2 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class SplittingParallelizer method generateWorkUnits.

/**
 * Split plan into multiple plans based on parallelization
 * Ideally it is applicable only to plans with two major fragments: Screen and UnionExchange
 * But there could be cases where we can remove even multiple exchanges like in case of "order by"
 * End goal is to get single major fragment: Screen with chain that ends up with a single minor fragment
 * from Leaf Exchange. This way each plan can run independently without any exchange involvement
 * @param options
 * @param foremanNode - not really applicable
 * @param queryId
 * @param reader
 * @param rootNode
 * @param planningSet
 * @param session
 * @param queryContextInfo
 * @return
 * @throws ExecutionSetupException
 */
private List<QueryWorkUnit> generateWorkUnits(OptionList options, DrillbitEndpoint foremanNode, QueryId queryId, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet, UserSession session, QueryContextInformation queryContextInfo) throws ExecutionSetupException {
    // now we generate all the individual plan fragments and associated assignments. Note, we need all endpoints
    // assigned before we can materialize, so we start a new loop here rather than utilizing the previous one.
    List<QueryWorkUnit> workUnits = Lists.newArrayList();
    int plansCount = 0;
    DrillbitEndpoint[] endPoints = null;
    long initialAllocation = 0;
    final Iterator<Wrapper> iter = planningSet.iterator();
    while (iter.hasNext()) {
        Wrapper wrapper = iter.next();
        Fragment node = wrapper.getNode();
        boolean isLeafFragment = node.getReceivingExchangePairs().size() == 0;
        final PhysicalOperator physicalOperatorRoot = node.getRoot();
        // get all the needed info from leaf fragment
        if ((physicalOperatorRoot instanceof Exchange) && isLeafFragment) {
            // need to get info about
            // number of minor fragments
            // assignedEndPoints
            // allocation
            plansCount = wrapper.getWidth();
            initialAllocation = (wrapper.getInitialAllocation() != 0) ? wrapper.getInitialAllocation() / plansCount : 0;
            endPoints = new DrillbitEndpoint[plansCount];
            for (int mfId = 0; mfId < plansCount; mfId++) {
                endPoints[mfId] = wrapper.getAssignedEndpoint(mfId);
            }
        }
    }
    if (plansCount == 0) {
        // no exchange, return list of single QueryWorkUnit
        workUnits.add(generateWorkUnit(options, foremanNode, queryId, rootNode, planningSet, session, queryContextInfo));
        return workUnits;
    }
    for (Wrapper wrapper : planningSet) {
        Fragment node = wrapper.getNode();
        final PhysicalOperator physicalOperatorRoot = node.getRoot();
        if (physicalOperatorRoot instanceof Exchange) {
            // get to 0 MajorFragment
            continue;
        }
        boolean isRootNode = rootNode == node;
        if (isRootNode && wrapper.getWidth() != 1) {
            throw new ForemanSetupException(String.format("Failure while trying to setup fragment. " + "The root fragment must always have parallelization one. In the current case, the width was set to %d.", wrapper.getWidth()));
        }
        // this fragment is always leaf, as we are removing all the exchanges
        boolean isLeafFragment = true;
        FragmentHandle handle = // 
        FragmentHandle.newBuilder().setMajorFragmentId(// 
        wrapper.getMajorFragmentId()).setMinorFragmentId(// minor fragment ID is going to be always 0, as plan will be split
        0).setQueryId(// 
        queryId).build();
        // Create a minorFragment for each major fragment.
        for (int minorFragmentId = 0; minorFragmentId < plansCount; minorFragmentId++) {
            // those fragments should be empty
            List<MinorFragmentDefn> fragments = Lists.newArrayList();
            MinorFragmentDefn rootFragment = null;
            FragmentRoot rootOperator = null;
            IndexedFragmentNode iNode = new IndexedFragmentNode(minorFragmentId, wrapper);
            wrapper.resetAllocation();
            // two visitors here
            // 1. To remove exchange
            // 2. To reset operator IDs as exchanges were removed
            PhysicalOperator op = physicalOperatorRoot.accept(ExchangeRemoverMaterializer.INSTANCE, iNode).accept(OperatorIdVisitor.INSTANCE, 0);
            Preconditions.checkArgument(op instanceof FragmentRoot);
            FragmentRoot root = (FragmentRoot) op;
            PlanFragment fragment = // 
            PlanFragment.newBuilder().setForeman(// 
            endPoints[minorFragmentId]).setHandle(// 
            handle).setAssignment(// 
            endPoints[minorFragmentId]).setLeafFragment(// 
            isLeafFragment).setContext(queryContextInfo).setMemInitial(// 
            initialAllocation).setMemMax(// TODO - for some reason OOM is using leaf fragment max allocation divided by width
            wrapper.getMaxAllocation()).setCredentials(session.getCredentials()).addAllCollector(CountRequiredFragments.getCollectors(root)).build();
            MinorFragmentDefn fragmentDefn = new MinorFragmentDefn(fragment, root, options);
            if (isRootNode) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Root fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
                }
                rootFragment = fragmentDefn;
                rootOperator = root;
            } else {
                if (logger.isDebugEnabled()) {
                    logger.debug("Remote fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
                }
                throw new ForemanSetupException(String.format("There should not be non-root/remote fragment present in plan split, but there is:", DrillStringUtils.unescapeJava(fragment.toString())));
            }
            // fragments should be always empty here
            workUnits.add(new QueryWorkUnit(rootOperator, rootFragment, fragments));
        }
    }
    return workUnits;
}
Also used : Wrapper(org.apache.drill.exec.planner.fragment.Wrapper) MinorFragmentDefn(org.apache.drill.exec.work.QueryWorkUnit.MinorFragmentDefn) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) FragmentRoot(org.apache.drill.exec.physical.base.FragmentRoot) FragmentHandle(org.apache.drill.exec.proto.ExecProtos.FragmentHandle) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) IndexedFragmentNode(org.apache.drill.exec.planner.fragment.Materializer.IndexedFragmentNode) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Exchange(org.apache.drill.exec.physical.base.Exchange) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) ForemanSetupException(org.apache.drill.exec.work.foreman.ForemanSetupException)

Example 3 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by axbaretto.

the class TestFragmentChecker method print.

private void print(String fragmentFile, int bitCount, int expectedFragmentCount) throws Exception {
    System.out.println(String.format("=================Building plan fragments for [%s].  Allowing %d total Drillbits.==================", fragmentFile, bitCount));
    PhysicalPlanReader ppr = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
    Fragment fragmentRoot = getRootFragment(ppr, fragmentFile);
    SimpleParallelizer par = new SimpleParallelizer(1000 * 1000, 5, 10, 1.2);
    List<DrillbitEndpoint> endpoints = Lists.newArrayList();
    DrillbitEndpoint localBit = null;
    for (int i = 0; i < bitCount; i++) {
        DrillbitEndpoint b1 = DrillbitEndpoint.newBuilder().setAddress("localhost").setControlPort(1234 + i).build();
        if (i == 0) {
            localBit = b1;
        }
        endpoints.add(b1);
    }
    final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
    QueryWorkUnit qwu = par.getFragments(new OptionList(), localBit, QueryId.getDefaultInstance(), endpoints, fragmentRoot, UserSession.Builder.newBuilder().withCredentials(UserBitShared.UserCredentials.newBuilder().setUserName("foo").build()).build(), queryContextInfo);
    qwu.applyPlan(ppr);
    System.out.println(String.format("=========ROOT FRAGMENT [%d:%d] =========", qwu.getRootFragment().getHandle().getMajorFragmentId(), qwu.getRootFragment().getHandle().getMinorFragmentId()));
    System.out.print(qwu.getRootFragment().getFragmentJson());
    for (PlanFragment f : qwu.getFragments()) {
        System.out.println(String.format("=========Fragment [%d:%d]=====", f.getHandle().getMajorFragmentId(), f.getHandle().getMinorFragmentId()));
        System.out.print(f.getFragmentJson());
    }
    assertEquals(expectedFragmentCount, qwu.getFragments().size() + 1);
}
Also used : DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) PhysicalPlanReader(org.apache.drill.exec.planner.PhysicalPlanReader) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) SimpleParallelizer(org.apache.drill.exec.planner.fragment.SimpleParallelizer) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) QueryContextInformation(org.apache.drill.exec.proto.BitControl.QueryContextInformation) OptionList(org.apache.drill.exec.server.options.OptionList) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment)

Example 4 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by apache.

the class TestPartitionSender method testThreadsHelper.

/**
 * Core of the testing
 * @param hashToRandomExchange
 * @param drillbitContext
 * @param options
 * @param incoming
 * @param registry
 * @param planReader
 * @param planningSet
 * @param rootFragment
 * @param expectedThreadsCount
 * @throws Exception
 */
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
    final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
    final QueryWorkUnit qwu = PARALLELIZER.generateWorkUnit(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
    qwu.applyPlan(planReader);
    final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
    for (PlanFragment planFragment : qwu.getFragments()) {
        if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
            continue;
        }
        MockPartitionSenderRootExec partionSenderRootExec = null;
        FragmentContextImpl context = null;
        try {
            context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
            context.setExecutorState(new MockExecutorState());
            final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
            final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
            partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
            assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
            partionSenderRootExec.createPartitioner();
            final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
            assertNotNull(partDecor);
            List<Partitioner> partitioners = partDecor.getPartitioners();
            assertNotNull(partitioners);
            final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
            assertEquals("Number of partitioners", actualThreads, partitioners.size());
            for (int i = 0; i < mfEndPoints.size(); i++) {
                assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
            }
            // check distribution of PartitionOutgoingBatch - should be even distribution
            boolean isFirst = true;
            int prevBatchCountSize = 0;
            int batchCountSize = 0;
            for (Partitioner part : partitioners) {
                @SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
                batchCountSize = outBatch.size();
                if (!isFirst) {
                    assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
                } else {
                    isFirst = false;
                }
                prevBatchCountSize = batchCountSize;
            }
            partionSenderRootExec.getStats().startProcessing();
            try {
                partDecor.partitionBatch(incoming);
            } finally {
                partionSenderRootExec.getStats().stopProcessing();
            }
            if (actualThreads == 1) {
                assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
            }
            // testing values distribution
            partitioners = partDecor.getPartitioners();
            isFirst = true;
            // since we have fake Nullvector distribution is skewed
            for (Partitioner part : partitioners) {
                @SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
                for (PartitionOutgoingBatch partOutBatch : outBatches) {
                    final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
                    if (isFirst) {
                        assertEquals("RecordCount", 100, recordCount);
                        isFirst = false;
                    } else {
                        assertEquals("RecordCount", 0, recordCount);
                    }
                }
            }
            // test exceptions within threads
            // test stats merging
            partionSenderRootExec.getStats().startProcessing();
            try {
                partDecor.executeMethodLogic(new InjectExceptionTest());
                fail("executeMethodLogic should throw an exception.");
            } catch (ExecutionException e) {
                final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
                partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
                final List<MetricValue> metrics = oPBuilder.getMetricList();
                for (MetricValue metric : metrics) {
                    if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
                        assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
                    }
                    if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
                        assertEquals(actualThreads, metric.getLongValue());
                    }
                }
                assertTrue(e.getCause() instanceof IOException);
                assertEquals(actualThreads - 1, e.getCause().getSuppressed().length);
            } finally {
                partionSenderRootExec.getStats().stopProcessing();
            }
        } finally {
            // cleanup
            partionSenderRootExec.close();
            context.close();
        }
    }
}
Also used : HashPartitionSender(org.apache.drill.exec.physical.config.HashPartitionSender) VectorAccessible(org.apache.drill.exec.record.VectorAccessible) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) FragmentContextImpl(org.apache.drill.exec.ops.FragmentContextImpl) IOException(java.io.IOException) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint) MinorFragmentEndpoint(org.apache.drill.exec.physical.MinorFragmentEndpoint) MetricValue(org.apache.drill.exec.proto.UserBitShared.MetricValue) MockExecutorState(org.apache.drill.test.OperatorFixture.MockExecutorState) List(java.util.List) OptionList(org.apache.drill.exec.server.options.OptionList) ExecutionException(java.util.concurrent.ExecutionException) QueryContextInformation(org.apache.drill.exec.proto.BitControl.QueryContextInformation)

Example 5 with QueryWorkUnit

use of org.apache.drill.exec.work.QueryWorkUnit in project drill by apache.

the class PlanSplitter method getFragments.

private List<PlanFragment> getFragments(final DrillbitContext dContext, final GetQueryPlanFragments req, final QueryContext queryContext, final QueryId queryId) throws Exception {
    final PhysicalPlan plan;
    final String query = req.getQuery();
    switch(req.getType()) {
        case SQL:
            final Pointer<String> textPlan = new Pointer<>();
            plan = DrillSqlWorker.getPlan(queryContext, query, textPlan);
            break;
        case PHYSICAL:
            plan = dContext.getPlanReader().readPhysicalPlan(query);
            break;
        default:
            throw new IllegalStateException("Planning fragments supports only SQL or PHYSICAL QueryType");
    }
    QueryResourceAllocator planner = dContext.getResourceManager().newResourceAllocator(queryContext);
    planner.visitAbstractPlan(plan);
    final PhysicalOperator rootOperator = plan.getSortedOperators(false).iterator().next();
    final Fragment rootFragment = rootOperator.accept(MakeFragmentsVisitor.INSTANCE, null);
    final SimpleParallelizer parallelizer = new SplittingParallelizer(plan.getProperties().hasResourcePlan, queryContext);
    List<PlanFragment> fragments = Lists.newArrayList();
    if (req.getSplitPlan()) {
        final List<QueryWorkUnit> queryWorkUnits = parallelizer.getSplitFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), dContext.getPlanReader(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
        for (QueryWorkUnit queryWorkUnit : queryWorkUnits) {
            planner.visitPhysicalPlan(queryWorkUnit);
            queryWorkUnit.applyPlan(dContext.getPlanReader());
            fragments.add(queryWorkUnit.getRootFragment());
            List<PlanFragment> childFragments = queryWorkUnit.getFragments();
            if (!childFragments.isEmpty()) {
                throw new IllegalStateException("Split plans can not have more then one fragment");
            }
        }
    } else {
        final QueryWorkUnit queryWorkUnit = parallelizer.generateWorkUnit(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
        planner.visitPhysicalPlan(queryWorkUnit);
        queryWorkUnit.applyPlan(dContext.getPlanReader());
        fragments.add(queryWorkUnit.getRootFragment());
        fragments.addAll(queryWorkUnit.getFragments());
    }
    return fragments;
}
Also used : PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) QueryWorkUnit(org.apache.drill.exec.work.QueryWorkUnit) SimpleParallelizer(org.apache.drill.exec.planner.fragment.SimpleParallelizer) Pointer(org.apache.drill.exec.util.Pointer) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) Fragment(org.apache.drill.exec.planner.fragment.Fragment) PlanFragment(org.apache.drill.exec.proto.BitControl.PlanFragment) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) SplittingParallelizer(org.apache.drill.exec.planner.fragment.contrib.SplittingParallelizer) QueryResourceAllocator(org.apache.drill.exec.work.foreman.rm.QueryResourceAllocator)

Aggregations

QueryWorkUnit (org.apache.drill.exec.work.QueryWorkUnit)17 PlanFragment (org.apache.drill.exec.proto.BitControl.PlanFragment)14 DrillbitEndpoint (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)10 Fragment (org.apache.drill.exec.planner.fragment.Fragment)9 PhysicalOperator (org.apache.drill.exec.physical.base.PhysicalOperator)8 QueryContextInformation (org.apache.drill.exec.proto.BitControl.QueryContextInformation)6 FragmentHandle (org.apache.drill.exec.proto.ExecProtos.FragmentHandle)6 OptionList (org.apache.drill.exec.server.options.OptionList)6 MinorFragmentEndpoint (org.apache.drill.exec.physical.MinorFragmentEndpoint)5 FragmentRoot (org.apache.drill.exec.physical.base.FragmentRoot)5 IndexedFragmentNode (org.apache.drill.exec.planner.fragment.Materializer.IndexedFragmentNode)5 SimpleParallelizer (org.apache.drill.exec.planner.fragment.SimpleParallelizer)5 ForemanSetupException (org.apache.drill.exec.work.foreman.ForemanSetupException)5 PhysicalPlanReader (org.apache.drill.exec.planner.PhysicalPlanReader)4 MinorFragmentDefn (org.apache.drill.exec.work.QueryWorkUnit.MinorFragmentDefn)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 FragmentContextImpl (org.apache.drill.exec.ops.FragmentContextImpl)2 PhysicalPlan (org.apache.drill.exec.physical.PhysicalPlan)2