use of org.apache.drill.exec.proto.ExecProtos.FragmentHandle in project drill by apache.
the class ParquetFormatPlugin method getRecordWriter.
public RecordWriter getRecordWriter(FragmentContext context, ParquetWriter writer) throws IOException, OutOfMemoryException {
Map<String, String> options = Maps.newHashMap();
options.put("location", writer.getLocation());
FragmentHandle handle = context.getHandle();
String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
options.put("prefix", fragmentId);
options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).connection);
options.put(ExecConstants.PARQUET_BLOCK_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_BLOCK_SIZE).num_val.toString());
options.put(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK, context.getOptions().getOption(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK).bool_val.toString());
options.put(ExecConstants.PARQUET_PAGE_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_PAGE_SIZE).num_val.toString());
options.put(ExecConstants.PARQUET_DICT_PAGE_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE).num_val.toString());
options.put(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE, context.getOptions().getOption(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE).string_val);
options.put(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING, context.getOptions().getOption(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING).bool_val.toString());
RecordWriter recordWriter = new ParquetRecordWriter(context, writer);
recordWriter.init(options);
return recordWriter;
}
use of org.apache.drill.exec.proto.ExecProtos.FragmentHandle in project drill by apache.
the class SimpleParallelizer method generateWorkUnit.
protected QueryWorkUnit generateWorkUnit(OptionList options, DrillbitEndpoint foremanNode, QueryId queryId, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet, UserSession session, QueryContextInformation queryContextInfo) throws ExecutionSetupException {
List<PlanFragment> fragments = Lists.newArrayList();
PlanFragment rootFragment = null;
FragmentRoot rootOperator = null;
// assigned before we can materialize, so we start a new loop here rather than utilizing the previous one.
for (Wrapper wrapper : planningSet) {
Fragment node = wrapper.getNode();
final PhysicalOperator physicalOperatorRoot = node.getRoot();
boolean isRootNode = rootNode == node;
if (isRootNode && wrapper.getWidth() != 1) {
throw new ForemanSetupException(String.format("Failure while trying to setup fragment. " + "The root fragment must always have parallelization one. In the current case, the width was set to %d.", wrapper.getWidth()));
}
// a fragment is self driven if it doesn't rely on any other exchanges.
boolean isLeafFragment = node.getReceivingExchangePairs().size() == 0;
// Create a minorFragment for each major fragment.
for (int minorFragmentId = 0; minorFragmentId < wrapper.getWidth(); minorFragmentId++) {
IndexedFragmentNode iNode = new IndexedFragmentNode(minorFragmentId, wrapper);
wrapper.resetAllocation();
PhysicalOperator op = physicalOperatorRoot.accept(Materializer.INSTANCE, iNode);
Preconditions.checkArgument(op instanceof FragmentRoot);
FragmentRoot root = (FragmentRoot) op;
// get plan as JSON
String plan;
String optionsData;
try {
plan = reader.writeJson(root);
optionsData = reader.writeJson(options);
} catch (JsonProcessingException e) {
throw new ForemanSetupException("Failure while trying to convert fragment into json.", e);
}
FragmentHandle handle = //
FragmentHandle.newBuilder().setMajorFragmentId(//
wrapper.getMajorFragmentId()).setMinorFragmentId(//
minorFragmentId).setQueryId(//
queryId).build();
PlanFragment fragment = //
PlanFragment.newBuilder().setForeman(//
foremanNode).setFragmentJson(//
plan).setHandle(//
handle).setAssignment(//
wrapper.getAssignedEndpoint(minorFragmentId)).setLeafFragment(//
isLeafFragment).setContext(queryContextInfo).setMemInitial(//
wrapper.getInitialAllocation()).setMemMax(wrapper.getMaxAllocation()).setOptionsJson(optionsData).setCredentials(session.getCredentials()).addAllCollector(CountRequiredFragments.getCollectors(root)).build();
if (isRootNode) {
if (logger.isDebugEnabled()) {
logger.debug("Root fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
}
rootFragment = fragment;
rootOperator = root;
} else {
if (logger.isDebugEnabled()) {
logger.debug("Remote fragment:\n {}", DrillStringUtils.unescapeJava(fragment.toString()));
}
fragments.add(fragment);
}
}
}
return new QueryWorkUnit(rootOperator, rootFragment, fragments);
}
use of org.apache.drill.exec.proto.ExecProtos.FragmentHandle in project drill by apache.
the class DumpCatTest method testDumpCat.
@Test
public void testDumpCat(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = defaultPhysicalPlanReader(c);
final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/trace/simple_trace.json"), Charsets.UTF_8));
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
while (exec.next()) {
}
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
exec.close();
FragmentHandle handle = context.getHandle();
/* Form the file name to which the trace output will dump the record batches */
String qid = QueryIdHelper.getQueryId(handle.getQueryId());
final int majorFragmentId = handle.getMajorFragmentId();
final int minorFragmentId = handle.getMinorFragmentId();
final String logLocation = c.getString(ExecConstants.TRACE_DUMP_DIRECTORY);
System.out.println("Found log location: " + logLocation);
final String filename = String.format("%s//%s_%d_%d_mock-scan", logLocation, qid, majorFragmentId, minorFragmentId);
System.out.println("File Name: " + filename);
final Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, c.getString(ExecConstants.TRACE_DUMP_FILESYSTEM));
final FileSystem fs = FileSystem.get(conf);
final Path path = new Path(filename);
assertTrue("Trace file does not exist", fs.exists(path));
final DumpCat dumpCat = new DumpCat();
//Test Query mode
try (final FileInputStream input = new FileInputStream(filename)) {
dumpCat.doQuery(input);
}
//Test Batch mode
try (final FileInputStream input = new FileInputStream(filename)) {
dumpCat.doBatch(input, 0, true);
}
}
use of org.apache.drill.exec.proto.ExecProtos.FragmentHandle in project drill by apache.
the class FragmentContext method getFragIdString.
public String getFragIdString() {
final FragmentHandle handle = getHandle();
final String frag = handle != null ? handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId() : "0:0";
return frag;
}
use of org.apache.drill.exec.proto.ExecProtos.FragmentHandle in project drill by apache.
the class ControlMessageHandler method handle.
@Override
public void handle(ControlConnection connection, int rpcType, ByteBuf pBody, ByteBuf dBody, ResponseSender sender) throws RpcException {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Received bit com message of type {}", rpcType);
}
switch(rpcType) {
case RpcType.REQ_CANCEL_FRAGMENT_VALUE:
{
final FragmentHandle handle = get(pBody, FragmentHandle.PARSER);
cancelFragment(handle);
sender.send(ControlRpcConfig.OK);
break;
}
case RpcType.REQ_CUSTOM_VALUE:
{
final CustomMessage customMessage = get(pBody, CustomMessage.PARSER);
sender.send(handlerRegistry.handle(customMessage, (DrillBuf) dBody));
break;
}
case RpcType.REQ_RECEIVER_FINISHED_VALUE:
{
final FinishedReceiver finishedReceiver = get(pBody, FinishedReceiver.PARSER);
receivingFragmentFinished(finishedReceiver);
sender.send(ControlRpcConfig.OK);
break;
}
case RpcType.REQ_FRAGMENT_STATUS_VALUE:
bee.getContext().getWorkBus().statusUpdate(get(pBody, FragmentStatus.PARSER));
// TODO: Support a type of message that has no response.
sender.send(ControlRpcConfig.OK);
break;
case RpcType.REQ_QUERY_CANCEL_VALUE:
{
final QueryId queryId = get(pBody, QueryId.PARSER);
final Foreman foreman = bee.getForemanForQueryId(queryId);
if (foreman != null) {
foreman.cancel();
sender.send(ControlRpcConfig.OK);
} else {
sender.send(ControlRpcConfig.FAIL);
}
break;
}
case RpcType.REQ_INITIALIZE_FRAGMENTS_VALUE:
{
final InitializeFragments fragments = get(pBody, InitializeFragments.PARSER);
for (int i = 0; i < fragments.getFragmentCount(); i++) {
startNewRemoteFragment(fragments.getFragment(i));
}
sender.send(ControlRpcConfig.OK);
break;
}
case RpcType.REQ_QUERY_STATUS_VALUE:
{
final QueryId queryId = get(pBody, QueryId.PARSER);
final Foreman foreman = bee.getForemanForQueryId(queryId);
if (foreman == null) {
throw new RpcException("Query not running on node.");
}
final QueryProfile profile = foreman.getQueryManager().getQueryProfile();
sender.send(new Response(RpcType.RESP_QUERY_STATUS, profile));
break;
}
case RpcType.REQ_UNPAUSE_FRAGMENT_VALUE:
{
final FragmentHandle handle = get(pBody, FragmentHandle.PARSER);
resumeFragment(handle);
sender.send(ControlRpcConfig.OK);
break;
}
default:
throw new RpcException("Not yet supported.");
}
}
Aggregations