use of org.apache.drill.exec.proto.BitControl.PlanFragment in project drill by apache.
the class DrillSeparatePlanningTest method testMultiMinorFragmentSimpleQuery.
@Test(timeout = 30000)
public void testMultiMinorFragmentSimpleQuery() throws Exception {
final String query = String.format("SELECT o_orderkey FROM dfs_test.`%s/multilevel/json`", TEST_RES_PATH);
QueryPlanFragments planFragments = getFragmentsHelper(query);
assertNotNull(planFragments);
assertTrue((planFragments.getFragmentsCount() > 1));
for (PlanFragment planFragment : planFragments.getFragmentsList()) {
assertTrue(planFragment.getLeafFragment());
}
getResultsHelper(planFragments);
}
use of org.apache.drill.exec.proto.BitControl.PlanFragment in project drill by apache.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), planReader, rootFragment, USER_SESSION, queryContextInfo);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContext context = null;
try {
context = new FragmentContext(drillbitContext, planFragment, null, registry);
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("Should throw IOException here");
} catch (IOException ioe) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertEquals(actualThreads - 1, ioe.getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.proto.BitControl.PlanFragment in project drill by apache.
the class PlanSplitter method getFragments.
private List<PlanFragment> getFragments(final DrillbitContext dContext, final GetQueryPlanFragments req, final QueryContext queryContext, final QueryId queryId) throws Exception {
final PhysicalPlan plan;
final String query = req.getQuery();
switch(req.getType()) {
case SQL:
final Pointer<String> textPlan = new Pointer<>();
plan = DrillSqlWorker.getPlan(queryContext, query, textPlan);
break;
case PHYSICAL:
plan = dContext.getPlanReader().readPhysicalPlan(query);
break;
default:
throw new IllegalStateException("Planning fragments supports only SQL or PHYSICAL QueryType");
}
MemoryAllocationUtilities.setupSortMemoryAllocations(plan, queryContext);
final PhysicalOperator rootOperator = plan.getSortedOperators(false).iterator().next();
final Fragment rootFragment = rootOperator.accept(MakeFragmentsVisitor.INSTANCE, null);
final SimpleParallelizer parallelizer = new SplittingParallelizer(queryContext);
List<PlanFragment> fragments = Lists.newArrayList();
if (req.getSplitPlan()) {
final List<QueryWorkUnit> queryWorkUnits = parallelizer.getSplitFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), dContext.getPlanReader(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
for (QueryWorkUnit queryWorkUnit : queryWorkUnits) {
fragments.add(queryWorkUnit.getRootFragment());
List<PlanFragment> childFragments = queryWorkUnit.getFragments();
if (!childFragments.isEmpty()) {
throw new IllegalStateException("Split plans can not have more then one fragment");
}
}
} else {
final QueryWorkUnit queryWorkUnit = parallelizer.getFragments(queryContext.getOptions().getOptionList(), queryContext.getCurrentEndpoint(), queryId, queryContext.getActiveEndpoints(), dContext.getPlanReader(), rootFragment, queryContext.getSession(), queryContext.getQueryContextInfo());
fragments.add(queryWorkUnit.getRootFragment());
fragments.addAll(queryWorkUnit.getFragments());
}
return fragments;
}
use of org.apache.drill.exec.proto.BitControl.PlanFragment in project drill by apache.
the class Foreman method sendRemoteFragments.
/**
* Send all the remote fragments belonging to a single target drillbit in one request.
*
* @param assignment the drillbit assigned to these fragments
* @param fragments the set of fragments
* @param latch the countdown latch used to track the requests to all endpoints
* @param fragmentSubmitFailures the submission failure counter used to track the requests to all endpoints
*/
private void sendRemoteFragments(final DrillbitEndpoint assignment, final Collection<PlanFragment> fragments, final CountDownLatch latch, final FragmentSubmitFailures fragmentSubmitFailures) {
@SuppressWarnings("resource") final Controller controller = drillbitContext.getController();
final InitializeFragments.Builder fb = InitializeFragments.newBuilder();
for (final PlanFragment planFragment : fragments) {
fb.addFragment(planFragment);
}
final InitializeFragments initFrags = fb.build();
logger.debug("Sending remote fragments to \nNode:\n{} \n\nData:\n{}", assignment, initFrags);
final FragmentSubmitListener listener = new FragmentSubmitListener(assignment, initFrags, latch, fragmentSubmitFailures);
controller.getTunnel(assignment).sendFragments(listener, initFrags);
}
use of org.apache.drill.exec.proto.BitControl.PlanFragment in project drill by apache.
the class Foreman method runPhysicalPlan.
private void runPhysicalPlan(final PhysicalPlan plan) throws ExecutionSetupException {
validatePlan(plan);
MemoryAllocationUtilities.setupSortMemoryAllocations(plan, queryContext);
//Marking endTime of Planning
queryManager.markPlanningEndTime();
if (queuingEnabled) {
acquireQuerySemaphore(plan);
moveToState(QueryState.STARTING, null);
//Marking endTime of Waiting in Queue
queryManager.markQueueWaitEndTime();
}
final QueryWorkUnit work = getQueryWorkUnit(plan);
final List<PlanFragment> planFragments = work.getFragments();
final PlanFragment rootPlanFragment = work.getRootFragment();
assert queryId == rootPlanFragment.getHandle().getQueryId();
drillbitContext.getWorkBus().addFragmentStatusListener(queryId, queryManager.getFragmentStatusListener());
drillbitContext.getClusterCoordinator().addDrillbitStatusListener(queryManager.getDrillbitStatusListener());
logger.debug("Submitting fragments to run.");
// set up the root fragment first so we'll have incoming buffers available.
setupRootFragment(rootPlanFragment, work.getRootOperator());
setupNonRootFragments(planFragments);
moveToState(QueryState.RUNNING, null);
logger.debug("Fragments running.");
}
Aggregations