use of org.apache.drill.exec.ops.AccountingDataTunnel in project drill by axbaretto.
the class PartitionSenderRootExec method sendEmptyBatch.
private void sendEmptyBatch(boolean isLast) {
BatchSchema schema = incoming.getSchema();
if (schema == null) {
// If the incoming batch has no schema (possible when there are no input records),
// create an empty schema to avoid NPE.
schema = BatchSchema.newBuilder().build();
}
FragmentHandle handle = context.getHandle();
for (MinorFragmentEndpoint destination : popConfig.getDestinations()) {
AccountingDataTunnel tunnel = context.getDataTunnel(destination.getEndpoint());
FragmentWritableBatch writableBatch = FragmentWritableBatch.getEmptyBatchWithSchema(isLast, handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), operator.getOppositeMajorFragmentId(), destination.getId(), schema);
stats.startWait();
try {
tunnel.sendRecordBatch(writableBatch);
} finally {
stats.stopWait();
}
}
stats.addLongStat(Metric.BATCHES_SENT, 1);
}
use of org.apache.drill.exec.ops.AccountingDataTunnel in project drill by apache.
the class RuntimeFilterReporter method sendOut.
public void sendOut(List<BloomFilter> bloomFilters, List<String> probeFields, RuntimeFilterDef runtimeFilterDef, int hashJoinOpId) {
boolean sendToForeman = runtimeFilterDef.isSendToForeman();
long rfIdentifier = runtimeFilterDef.getRuntimeFilterIdentifier();
ExecProtos.FragmentHandle fragmentHandle = context.getHandle();
DrillBuf[] data = new DrillBuf[bloomFilters.size()];
List<Integer> bloomFilterSizeInBytes = new ArrayList<>();
int i = 0;
for (BloomFilter bloomFilter : bloomFilters) {
DrillBuf bfContent = bloomFilter.getContent();
data[i] = bfContent;
bloomFilterSizeInBytes.add(bfContent.capacity());
i++;
}
UserBitShared.QueryId queryId = fragmentHandle.getQueryId();
int majorFragmentId = fragmentHandle.getMajorFragmentId();
int minorFragmentId = fragmentHandle.getMinorFragmentId();
BitData.RuntimeFilterBDef.Builder builder = BitData.RuntimeFilterBDef.newBuilder();
for (String probeFiled : probeFields) {
builder.addProbeFields(probeFiled);
}
BitData.RuntimeFilterBDef runtimeFilterB = builder.setQueryId(queryId).setMajorFragmentId(majorFragmentId).setMinorFragmentId(minorFragmentId).setToForeman(sendToForeman).setHjOpId(hashJoinOpId).setRfIdentifier(rfIdentifier).addAllBloomFilterSizeInBytes(bloomFilterSizeInBytes).build();
RuntimeFilterWritable runtimeFilterWritable = new RuntimeFilterWritable(runtimeFilterB, data);
if (sendToForeman) {
CoordinationProtos.DrillbitEndpoint foremanEndpoint = context.getForemanEndpoint();
AccountingDataTunnel dataTunnel = context.getDataTunnel(foremanEndpoint);
dataTunnel.sendRuntimeFilter(runtimeFilterWritable);
} else {
context.addRuntimeFilter(runtimeFilterWritable);
}
}
use of org.apache.drill.exec.ops.AccountingDataTunnel in project drill by apache.
the class RuntimeFilterSink method route.
private void route(RuntimeFilterWritable srcRuntimeFilterWritable) {
BitData.RuntimeFilterBDef runtimeFilterB = srcRuntimeFilterWritable.getRuntimeFilterBDef();
int joinMajorId = runtimeFilterB.getMajorFragmentId();
UserBitShared.QueryId queryId = runtimeFilterB.getQueryId();
List<String> probeFields = runtimeFilterB.getProbeFieldsList();
List<Integer> sizeInBytes = runtimeFilterB.getBloomFilterSizeInBytesList();
long rfIdentifier = runtimeFilterB.getRfIdentifier();
DrillBuf[] data = srcRuntimeFilterWritable.getData();
List<CoordinationProtos.DrillbitEndpoint> scanNodeEps = joinMjId2probeScanEps.get(joinMajorId);
int scanNodeSize = scanNodeEps.size();
srcRuntimeFilterWritable.retainBuffers(scanNodeSize - 1);
int scanNodeMjId = joinMjId2ScanMjId.get(joinMajorId);
for (int minorId = 0; minorId < scanNodeEps.size(); minorId++) {
BitData.RuntimeFilterBDef.Builder builder = BitData.RuntimeFilterBDef.newBuilder();
for (String probeField : probeFields) {
builder.addProbeFields(probeField);
}
BitData.RuntimeFilterBDef runtimeFilterBDef = builder.setQueryId(queryId).setMajorFragmentId(scanNodeMjId).setMinorFragmentId(minorId).setToForeman(false).setRfIdentifier(rfIdentifier).addAllBloomFilterSizeInBytes(sizeInBytes).build();
RuntimeFilterWritable runtimeFilterWritable = new RuntimeFilterWritable(runtimeFilterBDef, data);
CoordinationProtos.DrillbitEndpoint drillbitEndpoint = scanNodeEps.get(minorId);
DataTunnel dataTunnel = drillbitContext.getDataConnectionsPool().getTunnel(drillbitEndpoint);
Consumer<RpcException> exceptionConsumer = new Consumer<RpcException>() {
@Override
public void accept(final RpcException e) {
logger.warn("fail to broadcast a runtime filter to the probe side scan node", e);
}
@Override
public void interrupt(final InterruptedException e) {
logger.warn("fail to broadcast a runtime filter to the probe side scan node", e);
}
};
RpcOutcomeListener<BitData.AckWithCredit> statusHandler = new DataTunnelStatusHandler(exceptionConsumer, sendingAccountor);
AccountingDataTunnel accountingDataTunnel = new AccountingDataTunnel(dataTunnel, sendingAccountor, statusHandler);
accountingDataTunnel.sendRuntimeFilter(runtimeFilterWritable);
}
}
use of org.apache.drill.exec.ops.AccountingDataTunnel in project drill by apache.
the class PartitionSenderRootExec method sendEmptyBatch.
private void sendEmptyBatch(boolean isLast) {
BatchSchema schema = incoming.getSchema();
if (schema == null) {
// If the incoming batch has no schema (possible when there are no input records),
// create an empty schema to avoid NPE.
schema = BatchSchema.newBuilder().build();
}
FragmentHandle handle = context.getHandle();
for (MinorFragmentEndpoint destination : popConfig.getDestinations()) {
AccountingDataTunnel tunnel = context.getDataTunnel(destination.getEndpoint());
FragmentWritableBatch writableBatch = FragmentWritableBatch.getEmptyBatchWithSchema(isLast, handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), operator.getOppositeMajorFragmentId(), destination.getId(), schema);
stats.startWait();
try {
tunnel.sendRecordBatch(writableBatch);
} finally {
stats.stopWait();
}
}
stats.addLongStat(Metric.BATCHES_SENT, 1);
}
Aggregations