use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by apache.
the class FragmentStatusReporterTest method setUp.
@Before
public void setUp() throws Exception {
final FragmentContextImpl context = mock(FragmentContextImpl.class);
Controller controller = mock(Controller.class);
// Create Foreman Endpoint and it's tunnel
DrillbitEndpoint foremanEndpoint = DrillbitEndpoint.newBuilder().setAddress("10.0.0.2").build();
foremanTunnel = mock(ControlTunnel.class);
when(context.getController()).thenReturn(controller);
when(controller.getTunnel(foremanEndpoint)).thenReturn(foremanTunnel);
when(context.getStats()).thenReturn(mock(FragmentStats.class));
when(context.getHandle()).thenReturn(FragmentHandle.getDefaultInstance());
when(context.getAllocator()).thenReturn(mock(BufferAllocator.class));
when(context.getForemanEndpoint()).thenReturn(foremanEndpoint);
statusReporter = new FragmentStatusReporter(context);
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class FragmentsRunner method setupRootFragment.
/**
* Set up the root fragment (which will run locally), and submit it for execution.
*
* @param rootFragment root fragment
* @param rootOperator root operator
* @throws ExecutionSetupException
*/
private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator) throws ExecutionSetupException {
QueryManager queryManager = foreman.getQueryManager();
final FragmentContextImpl rootContext = new FragmentContextImpl(drillbitContext, rootFragment, foreman.getQueryContext(), initiatingClient, drillbitContext.getFunctionImplementationRegistry());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(rootContext);
final FragmentExecutor rootRunner = new FragmentExecutor(rootContext, rootFragment, statusReporter, rootOperator);
final RootFragmentManager fragmentManager = new RootFragmentManager(rootFragment, rootRunner, statusReporter);
queryManager.addFragmentStatusTracker(rootFragment, true);
// FragmentManager is setting buffer for FragmentContext
if (rootContext.isBuffersDone()) {
// if we don't have to wait for any incoming data, start the fragment runner.
bee.addFragmentRunner(rootRunner);
} else {
// if we do, record the fragment manager in the workBus.
drillbitContext.getWorkBus().addFragmentManager(fragmentManager);
}
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class FragmentsRunner method startLocalFragment.
/**
* Start the locally assigned leaf or intermediate fragment
*
* @param fragment fragment
*/
private void startLocalFragment(final PlanFragment fragment) throws ExecutionSetupException {
logger.debug("Received local fragment start instruction", fragment);
final FragmentContextImpl fragmentContext = new FragmentContextImpl(drillbitContext, fragment, drillbitContext.getFunctionImplementationRegistry());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(fragmentContext);
final FragmentExecutor fragmentExecutor = new FragmentExecutor(fragmentContext, fragment, statusReporter);
// we either need to start the fragment if it is a leaf fragment, or set up a fragment manager if it is non leaf.
if (fragment.getLeafFragment()) {
bee.addFragmentRunner(fragmentExecutor);
} else {
// isIntermediate, store for incoming data.
final NonRootFragmentManager manager = new NonRootFragmentManager(fragment, fragmentExecutor, statusReporter);
drillbitContext.getWorkBus().addFragmentManager(manager);
}
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class ControlMessageHandler method startNewFragment.
/**
* Start a new fragment on this node. These fragments can be leaf or intermediate fragments
* which are scheduled by remote or local Foreman node.
* @param fragment
* @throws UserRpcException
*/
private void startNewFragment(final PlanFragment fragment, final DrillbitContext drillbitContext) throws UserRpcException {
logger.debug("Received remote fragment start instruction", fragment);
try {
final FragmentContextImpl fragmentContext = new FragmentContextImpl(drillbitContext, fragment, drillbitContext.getFunctionImplementationRegistry());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(fragmentContext);
final FragmentExecutor fragmentExecutor = new FragmentExecutor(fragmentContext, fragment, statusReporter);
// we either need to start the fragment if it is a leaf fragment, or set up a fragment manager if it is non leaf.
if (fragment.getLeafFragment()) {
bee.addFragmentRunner(fragmentExecutor);
} else {
// isIntermediate, store for incoming data.
final NonRootFragmentManager manager = new NonRootFragmentManager(fragment, fragmentExecutor, statusReporter);
drillbitContext.getWorkBus().addFragmentManager(manager);
}
} catch (final ExecutionSetupException ex) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failed to create fragment context", ex);
} catch (final Exception e) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failure while trying to start remote fragment", e);
} catch (final OutOfMemoryError t) {
if (t.getMessage().startsWith("Direct buffer")) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Out of direct memory while trying to start remote fragment", t);
} else {
throw t;
}
}
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("Should throw IOException here");
} catch (IOException ioe) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertEquals(actualThreads - 1, ioe.getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
Aggregations