use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestCastFunctions method testCastVarChar.
@Test
public //cast to varchar(length)
void testCastVarChar(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/functions/cast/testCastVarChar.json"), Charsets.UTF_8));
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(CONFIG);
final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
while (exec.next()) {
final VarCharVector c0 = exec.getValueVectorById(new SchemaPath("int_lit_cast", ExpressionPosition.UNKNOWN), VarCharVector.class);
final VarCharVector.Accessor a0 = c0.getAccessor();
int count = 0;
for (int i = 0; i < c0.getAccessor().getValueCount(); i++) {
final VarCharHolder holder0 = new VarCharHolder();
a0.get(i, holder0);
assertEquals("123", StringFunctionHelpers.toStringFromUTF8(holder0.start, holder0.end, holder0.buffer));
++count;
}
assertEquals(5, count);
}
exec.close();
context.close();
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestRecordIterator method testSimpleIterator.
@Test
public void testSimpleIterator(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final String planStr = Files.toString(FileUtils.getResourceAsFile("/record/test_recorditerator.json"), Charsets.UTF_8);
final PhysicalPlan plan = reader.readPhysicalPlan(planStr);
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final List<PhysicalOperator> operatorList = plan.getSortedOperators(false);
SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) operatorList.iterator().next()));
RecordBatch singleBatch = exec.getIncoming();
PhysicalOperator dummyPop = operatorList.iterator().next();
OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE, OperatorUtilities.getChildCount(dummyPop));
OperatorStats stats = exec.getContext().getStats().newOperatorStats(def, exec.getContext().getAllocator());
RecordIterator iter = new RecordIterator(singleBatch, null, exec.getContext().newOperatorContext(dummyPop, stats), 0, false);
int totalRecords = 0;
List<ValueVector> vectors = null;
while (true) {
iter.next();
if (iter.finished()) {
break;
} else {
// First time save vectors.
if (vectors == null) {
vectors = Lists.newArrayList();
for (VectorWrapper vw : iter) {
vectors.add(vw.getValueVector());
}
}
final int position = iter.getCurrentPosition();
if (position % 2 == 0) {
assertTrue(checkValues(vectors, position));
} else {
assertTrue(checkValues(vectors, position));
}
totalRecords++;
}
assertEquals(0, iter.cachedBatches().size());
}
assertEquals(11112, totalRecords);
try {
iter.mark();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
try {
iter.reset();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestBitRpc method testConnectionBackpressure.
@Test
public void testConnectionBackpressure(@Injectable WorkerBee bee, @Injectable final WorkEventBus workBus) throws Exception {
DrillConfig config1 = DrillConfig.create();
final BootStrapContext c = new BootStrapContext(config1, ClassPathScanner.fromPrescan(config1));
DrillConfig config2 = DrillConfig.create();
BootStrapContext c2 = new BootStrapContext(config2, ClassPathScanner.fromPrescan(config2));
final FragmentContext fcon = new MockUp<FragmentContext>() {
BufferAllocator getAllocator() {
return c.getAllocator();
}
}.getMockInstance();
final FragmentManager fman = new MockUp<FragmentManager>() {
int v = 0;
@Mock
boolean handle(IncomingDataBatch batch) throws FragmentSetupException, IOException {
try {
v++;
if (v % 10 == 0) {
System.out.println("sleeping.");
Thread.sleep(3000);
}
} catch (InterruptedException e) {
}
RawFragmentBatch rfb = batch.newRawFragmentBatch(c.getAllocator());
rfb.sendOk();
rfb.release();
return true;
}
public FragmentContext getFragmentContext() {
return fcon;
}
}.getMockInstance();
new NonStrictExpectations() {
{
workBus.getFragmentManagerIfExists((FragmentHandle) any);
result = fman;
workBus.getFragmentManager((FragmentHandle) any);
result = fman;
}
};
int port = 1234;
DataConnectionConfig config = new DataConnectionConfig(c.getAllocator(), c, new DataServerRequestHandler(workBus, bee));
DataServer server = new DataServer(config);
port = server.bind(port, true);
DrillbitEndpoint ep = DrillbitEndpoint.newBuilder().setAddress("localhost").setDataPort(port).build();
DataConnectionManager manager = new DataConnectionManager(ep, config);
DataTunnel tunnel = new DataTunnel(manager);
AtomicLong max = new AtomicLong(0);
for (int i = 0; i < 40; i++) {
long t1 = System.currentTimeMillis();
tunnel.sendRecordBatch(new TimingOutcome(max), new FragmentWritableBatch(false, QueryId.getDefaultInstance(), 1, 1, 1, 1, getRandomBatch(c.getAllocator(), 5000)));
System.out.println(System.currentTimeMillis() - t1);
// System.out.println("sent.");
}
System.out.println(String.format("Max time: %d", max.get()));
assertTrue(max.get() > 2700);
Thread.sleep(5000);
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.getFragments(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), planReader, rootFragment, USER_SESSION, queryContextInfo);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContext context = null;
try {
context = new FragmentContext(drillbitContext, planFragment, null, registry);
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("Should throw IOException here");
} catch (IOException ioe) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertEquals(actualThreads - 1, ioe.getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
use of org.apache.drill.exec.ops.FragmentContext in project drill by apache.
the class TestHashJoin method testHJMockScanCommon.
private void testHJMockScanCommon(final DrillbitContext bitContext, UserClientConnection connection, String physicalPlan, int expectedRows) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile(physicalPlan), Charsets.UTF_8));
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
int totalRecordCount = 0;
while (exec.next()) {
totalRecordCount += exec.getRecordCount();
}
exec.close();
assertEquals(expectedRows, totalRecordCount);
System.out.println("Total Record Count: " + totalRecordCount);
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
}
Aggregations