use of org.apache.drill.exec.exception.FragmentSetupException in project drill by apache.
the class ExchangeRemoverMaterializer method visitStore.
@Override
public PhysicalOperator visitStore(Store store, IndexedFragmentNode iNode) throws ExecutionSetupException {
PhysicalOperator child = store.getChild().accept(this, iNode);
iNode.addAllocation(store);
try {
PhysicalOperator o = store.getSpecificStore(child, iNode.getMinorFragmentId());
return o;
} catch (PhysicalOperatorSetupException e) {
throw new FragmentSetupException("Failure while generating a specific Store materialization.", e);
}
}
use of org.apache.drill.exec.exception.FragmentSetupException in project drill by apache.
the class TestBitRpc method testConnectionBackpressure.
@Test
public void testConnectionBackpressure(@Injectable WorkerBee bee, @Injectable final WorkEventBus workBus) throws Exception {
DrillConfig config1 = DrillConfig.create();
final BootStrapContext c = new BootStrapContext(config1, ClassPathScanner.fromPrescan(config1));
DrillConfig config2 = DrillConfig.create();
BootStrapContext c2 = new BootStrapContext(config2, ClassPathScanner.fromPrescan(config2));
final FragmentContext fcon = new MockUp<FragmentContext>() {
BufferAllocator getAllocator() {
return c.getAllocator();
}
}.getMockInstance();
final FragmentManager fman = new MockUp<FragmentManager>() {
int v = 0;
@Mock
boolean handle(IncomingDataBatch batch) throws FragmentSetupException, IOException {
try {
v++;
if (v % 10 == 0) {
System.out.println("sleeping.");
Thread.sleep(3000);
}
} catch (InterruptedException e) {
}
RawFragmentBatch rfb = batch.newRawFragmentBatch(c.getAllocator());
rfb.sendOk();
rfb.release();
return true;
}
public FragmentContext getFragmentContext() {
return fcon;
}
}.getMockInstance();
new NonStrictExpectations() {
{
workBus.getFragmentManagerIfExists((FragmentHandle) any);
result = fman;
workBus.getFragmentManager((FragmentHandle) any);
result = fman;
}
};
int port = 1234;
DataConnectionConfig config = new DataConnectionConfig(c.getAllocator(), c, new DataServerRequestHandler(workBus, bee));
DataServer server = new DataServer(config);
port = server.bind(port, true);
DrillbitEndpoint ep = DrillbitEndpoint.newBuilder().setAddress("localhost").setDataPort(port).build();
DataConnectionManager manager = new DataConnectionManager(ep, config);
DataTunnel tunnel = new DataTunnel(manager);
AtomicLong max = new AtomicLong(0);
for (int i = 0; i < 40; i++) {
long t1 = System.currentTimeMillis();
tunnel.sendRecordBatch(new TimingOutcome(max), new FragmentWritableBatch(false, QueryId.getDefaultInstance(), 1, 1, 1, 1, getRandomBatch(c.getAllocator(), 5000)));
System.out.println(System.currentTimeMillis() - t1);
// System.out.println("sent.");
}
System.out.println(String.format("Max time: %d", max.get()));
assertTrue(max.get() > 2700);
Thread.sleep(5000);
}
use of org.apache.drill.exec.exception.FragmentSetupException in project drill by apache.
the class IncomingBuffers method batchArrived.
public boolean batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException {
// Otherwise we would leak memory.
try (AutoCloseableLock lock = sharedIncomingBatchLock.open()) {
if (closed) {
return false;
}
if (incomingBatch.getHeader().getIsLastBatch()) {
streamsRemaining.decrementAndGet();
}
final int sendMajorFragmentId = incomingBatch.getHeader().getSendingMajorFragmentId();
DataCollector collector = collectorMap.get(sendMajorFragmentId);
if (collector == null) {
throw new FragmentSetupException(String.format("We received a major fragment id that we were not expecting. The id was %d. %s", sendMajorFragmentId, Arrays.toString(collectorMap.values().toArray())));
}
synchronized (collector) {
final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(context.getAllocator());
boolean decrementedToZero = collector.batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch);
newRawFragmentBatch.release();
// we should only return true if remaining required has been decremented and is currently equal to zero.
return decrementedToZero;
}
}
}
use of org.apache.drill.exec.exception.FragmentSetupException in project drill by apache.
the class Materializer method visitStore.
@Override
public PhysicalOperator visitStore(Store store, IndexedFragmentNode iNode) throws ExecutionSetupException {
PhysicalOperator child = store.getChild().accept(this, iNode);
iNode.addAllocation(store);
try {
PhysicalOperator o = store.getSpecificStore(child, iNode.getMinorFragmentId());
o.setOperatorId(Short.MAX_VALUE & store.getOperatorId());
// logger.debug("New materialized store node {} with child {}", o, child);
return o;
} catch (PhysicalOperatorSetupException e) {
throw new FragmentSetupException("Failure while generating a specific Store materialization.");
}
}
use of org.apache.drill.exec.exception.FragmentSetupException in project drill by apache.
the class DataServerRequestHandler method handle.
@Override
public void handle(DataServerConnection connection, int rpcType, ByteBuf pBody, ByteBuf dBody, ResponseSender sender) throws RpcException {
assert rpcType == BitData.RpcType.REQ_RECORD_BATCH_VALUE;
final FragmentRecordBatch fragmentBatch = RpcBus.get(pBody, FragmentRecordBatch.PARSER);
final AckSender ack = new AckSender(sender);
// increment so we don't get false returns.
ack.increment();
try {
final IncomingDataBatch batch = new IncomingDataBatch(fragmentBatch, (DrillBuf) dBody, ack);
final int targetCount = fragmentBatch.getReceivingMinorFragmentIdCount();
// randomize who gets first transfer (and thus ownership) so memory usage is balanced when we're sharing amongst
// multiple fragments.
final int firstOwner = ThreadLocalRandom.current().nextInt(targetCount);
submit(batch, firstOwner, targetCount);
submit(batch, 0, firstOwner);
} catch (IOException | FragmentSetupException e) {
logger.error("Failure while getting fragment manager. {}", QueryIdHelper.getQueryIdentifiers(fragmentBatch.getQueryId(), fragmentBatch.getReceivingMajorFragmentId(), fragmentBatch.getReceivingMinorFragmentIdList()), e);
ack.clear();
sender.send(new Response(BitData.RpcType.ACK, Acks.FAIL));
} finally {
// decrement the extra reference we grabbed at the top.
ack.sendOk();
}
}
Aggregations