use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestSimpleUnion method testUnion.
@Test
public void testUnion(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/union/test1.json"), Charsets.UTF_8));
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
final int[] counts = new int[] { 100, 50 };
int i = 0;
while (exec.next()) {
System.out.println("iteration count:" + exec.getRecordCount());
assertEquals(counts[i++], exec.getRecordCount());
}
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class ControlMessageHandler method startNewRemoteFragment.
private void startNewRemoteFragment(final PlanFragment fragment) throws UserRpcException {
logger.debug("Received remote fragment start instruction", fragment);
final DrillbitContext drillbitContext = bee.getContext();
try {
// we either need to start the fragment if it is a leaf fragment, or set up a fragment manager if it is non leaf.
if (fragment.getLeafFragment()) {
final FragmentContext context = new FragmentContext(drillbitContext, fragment, drillbitContext.getFunctionImplementationRegistry());
final ControlTunnel tunnel = drillbitContext.getController().getTunnel(fragment.getForeman());
final FragmentStatusReporter statusReporter = new FragmentStatusReporter(context, tunnel);
final FragmentExecutor fr = new FragmentExecutor(context, fragment, statusReporter);
bee.addFragmentRunner(fr);
} else {
// isIntermediate, store for incoming data.
final NonRootFragmentManager manager = new NonRootFragmentManager(fragment, drillbitContext);
drillbitContext.getWorkBus().addFragmentManager(manager);
}
} catch (final Exception e) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Failure while trying to start remote fragment", e);
} catch (final OutOfMemoryError t) {
if (t.getMessage().startsWith("Direct buffer")) {
throw new UserRpcException(drillbitContext.getEndpoint(), "Out of direct memory while trying to start remote fragment", t);
} else {
throw t;
}
}
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class Foreman method setupRootFragment.
/**
* Set up the root fragment (which will run locally), and submit it for execution.
*
* @param rootFragment
* @param rootOperator
* @throws ExecutionSetupException
*/
private void setupRootFragment(final PlanFragment rootFragment, final FragmentRoot rootOperator) throws ExecutionSetupException {
@SuppressWarnings("resource") final FragmentContext rootContext = new FragmentContext(drillbitContext, rootFragment, queryContext, initiatingClient, drillbitContext.getFunctionImplementationRegistry());
@SuppressWarnings("resource") final IncomingBuffers buffers = new IncomingBuffers(rootFragment, rootContext);
rootContext.setBuffers(buffers);
queryManager.addFragmentStatusTracker(rootFragment, true);
final ControlTunnel tunnel = drillbitContext.getController().getTunnel(queryContext.getCurrentEndpoint());
final FragmentExecutor rootRunner = new FragmentExecutor(rootContext, rootFragment, new FragmentStatusReporter(rootContext, tunnel), rootOperator);
final RootFragmentManager fragmentManager = new RootFragmentManager(rootFragment.getHandle(), buffers, rootRunner);
if (buffers.isDone()) {
// if we don't have to wait for any incoming data, start the fragment runner.
bee.addFragmentRunner(fragmentManager.getRunnable());
} else {
// if we do, record the fragment manager in the workBus.
drillbitContext.getWorkBus().addFragmentManager(fragmentManager);
}
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class HiveScanBatchCreator method getBatch.
@Override
public ScanBatch getBatch(FragmentContext context, HiveSubScan config, List<RecordBatch> children) throws ExecutionSetupException {
List<RecordReader> readers = Lists.newArrayList();
HiveTableWithColumnCache table = config.getTable();
List<InputSplit> splits = config.getInputSplits();
List<HivePartition> partitions = config.getPartitions();
boolean hasPartitions = (partitions != null && partitions.size() > 0);
int i = 0;
final UserGroupInformation proxyUgi = ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName());
final HiveConf hiveConf = config.getHiveConf();
final String formatName = table.getSd().getInputFormat();
Class<? extends HiveAbstractReader> readerClass = HiveDefaultReader.class;
if (readerMap.containsKey(formatName)) {
readerClass = readerMap.get(formatName);
}
Constructor<? extends HiveAbstractReader> readerConstructor = null;
try {
readerConstructor = readerClass.getConstructor(HiveTableWithColumnCache.class, HivePartition.class, InputSplit.class, List.class, FragmentContext.class, HiveConf.class, UserGroupInformation.class);
for (InputSplit split : splits) {
readers.add(readerConstructor.newInstance(table, (hasPartitions ? partitions.get(i++) : null), split, config.getColumns(), context, hiveConf, proxyUgi));
}
if (readers.size() == 0) {
readers.add(readerConstructor.newInstance(table, null, null, config.getColumns(), context, hiveConf, proxyUgi));
}
} catch (Exception e) {
logger.error("No constructor for {}, thrown {}", readerClass.getName(), e);
}
return new ScanBatch(config, context, readers.iterator());
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class RunRootExec method main.
public static void main(String[] args) throws Exception {
String path = args[0];
int iterations = Integer.parseInt(args[1]);
Drillbit bit = new Drillbit(c, RemoteServiceSet.getLocalServiceSet(), ClassPathScanner.fromPrescan(c));
bit.run();
DrillbitContext bitContext = bit.getContext();
PhysicalPlanReader reader = bitContext.getPlanReader();
PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(new File(path), Charsets.UTF_8));
FunctionImplementationRegistry registry = bitContext.getFunctionImplementationRegistry();
FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), null, registry);
SimpleRootExec exec;
for (int i = 0; i < iterations; i++) {
Stopwatch w = Stopwatch.createStarted();
System.out.println("STARTITER:" + i);
exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
while (exec.next()) {
for (ValueVector v : exec) {
v.clear();
}
}
System.out.println("ENDITER: " + i);
System.out.println("TIME: " + w.elapsed(TimeUnit.MILLISECONDS) + "ms");
exec.close();
}
context.close();
bit.close();
}
Aggregations