use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class TestBitRpc method testConnectionBackpressure.
@Test
public void testConnectionBackpressure() throws Exception {
final WorkerBee bee = mock(WorkerBee.class);
final WorkEventBus workBus = mock(WorkEventBus.class);
final DrillConfig config1 = DrillConfig.create();
final BootStrapContext c = new BootStrapContext(config1, SystemOptionManager.createDefaultOptionDefinitions(), ClassPathScanner.fromPrescan(config1));
final FragmentContextImpl fcon = mock(FragmentContextImpl.class);
when(fcon.getAllocator()).thenReturn(c.getAllocator());
final FragmentManager fman = new MockFragmentManager(c);
when(workBus.getFragmentManager(any(FragmentHandle.class))).thenReturn(fman);
int port = 1234;
DataConnectionConfig config = new DataConnectionConfig(c.getAllocator(), c, new DataServerRequestHandler(workBus, bee));
DataServer server = new DataServer(config);
port = server.bind(port, true);
DrillbitEndpoint ep = DrillbitEndpoint.newBuilder().setAddress("localhost").setDataPort(port).build();
DataConnectionManager manager = new DataConnectionManager(ep, config);
DataTunnel tunnel = new DataTunnel(manager);
AtomicLong max = new AtomicLong(0);
for (int i = 0; i < 40; i++) {
long t1 = System.currentTimeMillis();
tunnel.sendRecordBatch(new TimingOutcome(max), new FragmentWritableBatch(false, QueryId.getDefaultInstance(), 1, 1, 1, 1, getRandomBatch(c.getAllocator(), 5000)));
System.out.println(System.currentTimeMillis() - t1);
}
System.out.println(String.format("Max time: %d", max.get()));
assertTrue(max.get() > 2700);
Thread.sleep(5000);
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class TestRecordIterator method testSimpleIterator.
@Test
public void testSimpleIterator() throws Throwable {
final DrillbitContext bitContext = mockDrillbitContext();
final UserClientConnection connection = Mockito.mock(UserClientConnection.class);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final String planStr = Files.toString(DrillFileUtils.getResourceAsFile("/record/test_recorditerator.json"), Charsets.UTF_8);
final PhysicalPlan plan = reader.readPhysicalPlan(planStr);
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContextImpl context = new FragmentContextImpl(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final List<PhysicalOperator> operatorList = plan.getSortedOperators(false);
SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) operatorList.iterator().next()));
RecordBatch singleBatch = exec.getIncoming();
PhysicalOperator dummyPop = operatorList.iterator().next();
OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE, OperatorUtilities.getChildCount(dummyPop));
OperatorStats stats = exec.getContext().getStats().newOperatorStats(def, exec.getContext().getAllocator());
RecordIterator iter = new RecordIterator(singleBatch, null, exec.getContext().newOperatorContext(dummyPop, stats), 0, false, null);
int totalRecords = 0;
List<ValueVector> vectors = null;
while (true) {
iter.next();
if (iter.finished()) {
break;
} else {
// First time save vectors.
if (vectors == null) {
vectors = Lists.newArrayList();
for (VectorWrapper vw : iter) {
vectors.add(vw.getValueVector());
}
}
final int position = iter.getCurrentPosition();
if (position % 2 == 0) {
assertTrue(checkValues(vectors, position));
} else {
assertTrue(checkValues(vectors, position));
}
totalRecords++;
}
assertEquals(0, iter.cachedBatches().size());
}
assertEquals(11112, totalRecords);
try {
iter.mark();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
try {
iter.reset();
assertTrue(false);
} catch (UnsupportedOperationException e) {
}
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class TestParquetFilterPushDown method initFSAndCreateFragContext.
@BeforeClass
public static void initFSAndCreateFragContext() throws Exception {
fs = getLocalFileSystem();
fragContext = new FragmentContextImpl(bits[0].getContext(), BitControl.PlanFragment.getDefaultInstance(), null, bits[0].getContext().getFunctionImplementationRegistry());
dirTestWatcher.copyResourceToRoot(Paths.get("parquetFilterPush"));
dirTestWatcher.copyResourceToRoot(Paths.get("parquet", "multirowgroup.parquet"));
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by axbaretto.
the class ParquetRecordReaderTest method testPerformance.
@Test
@Ignore
public void testPerformance() throws Exception {
final DrillbitContext bitContext = mock(DrillbitContext.class);
final UserClientConnection connection = mock(UserClientConnection.class);
final DrillConfig c = DrillConfig.create();
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContextImpl context = new FragmentContextImpl(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final String fileName = "/tmp/parquet_test_performance.parquet";
final HashMap<String, FieldInfo> fields = new HashMap<>();
final ParquetTestProperties props = new ParquetTestProperties(1, 20 * 1000 * 1000, DEFAULT_BYTES_PER_PAGE, fields);
populateFieldInfoMap(props);
final Configuration dfsConfig = new Configuration();
final List<Footer> footers = ParquetFileReader.readFooters(dfsConfig, new Path(fileName));
final Footer f = footers.iterator().next();
final List<SchemaPath> columns = Lists.newArrayList();
columns.add(new SchemaPath("_MAP.integer", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bigInt", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.f", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.d", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.b", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin2", ExpressionPosition.UNKNOWN));
int totalRowCount = 0;
final FileSystem fs = new CachedSingleFileSystem(fileName);
final BufferAllocator allocator = RootAllocatorFactory.newRoot(c);
for (int i = 0; i < 25; i++) {
final ParquetRecordReader rr = new ParquetRecordReader(context, fileName, 0, fs, CodecFactory.createDirectCodecFactory(dfsConfig, new ParquetDirectByteBufferAllocator(allocator), 0), f.getParquetMetadata(), columns, ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION);
final TestOutputMutator mutator = new TestOutputMutator(allocator);
rr.setup(null, mutator);
final Stopwatch watch = Stopwatch.createStarted();
int rowCount = 0;
while ((rowCount = rr.next()) > 0) {
totalRowCount += rowCount;
}
System.out.println(String.format("Time completed: %s. ", watch.elapsed(TimeUnit.MILLISECONDS)));
rr.close();
}
allocator.close();
System.out.println(String.format("Total row count %s", totalRowCount));
}
use of org.apache.drill.exec.ops.FragmentContextImpl in project drill by apache.
the class TestPartitionSender method testThreadsHelper.
/**
* Core of the testing
* @param hashToRandomExchange
* @param drillbitContext
* @param options
* @param incoming
* @param registry
* @param planReader
* @param planningSet
* @param rootFragment
* @param expectedThreadsCount
* @throws Exception
*/
private void testThreadsHelper(HashToRandomExchange hashToRandomExchange, DrillbitContext drillbitContext, OptionList options, RecordBatch incoming, FunctionImplementationRegistry registry, PhysicalPlanReader planReader, PlanningSet planningSet, Fragment rootFragment, int expectedThreadsCount) throws Exception {
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
final QueryWorkUnit qwu = PARALLELIZER.generateWorkUnit(options, drillbitContext.getEndpoint(), QueryId.getDefaultInstance(), drillbitContext.getBits(), rootFragment, USER_SESSION, queryContextInfo);
qwu.applyPlan(planReader);
final List<MinorFragmentEndpoint> mfEndPoints = PhysicalOperatorUtil.getIndexOrderedEndpoints(Lists.newArrayList(drillbitContext.getBits()));
for (PlanFragment planFragment : qwu.getFragments()) {
if (!planFragment.getFragmentJson().contains("hash-partition-sender")) {
continue;
}
MockPartitionSenderRootExec partionSenderRootExec = null;
FragmentContextImpl context = null;
try {
context = new FragmentContextImpl(drillbitContext, planFragment, null, registry);
context.setExecutorState(new MockExecutorState());
final int majorFragmentId = planFragment.getHandle().getMajorFragmentId();
final HashPartitionSender partSender = new HashPartitionSender(majorFragmentId, hashToRandomExchange, hashToRandomExchange.getExpression(), mfEndPoints);
partionSenderRootExec = new MockPartitionSenderRootExec(context, incoming, partSender);
assertEquals("Number of threads calculated", expectedThreadsCount, partionSenderRootExec.getNumberPartitions());
partionSenderRootExec.createPartitioner();
final PartitionerDecorator partDecor = partionSenderRootExec.getPartitioner();
assertNotNull(partDecor);
List<Partitioner> partitioners = partDecor.getPartitioners();
assertNotNull(partitioners);
final int actualThreads = DRILLBITS_COUNT > expectedThreadsCount ? expectedThreadsCount : DRILLBITS_COUNT;
assertEquals("Number of partitioners", actualThreads, partitioners.size());
for (int i = 0; i < mfEndPoints.size(); i++) {
assertNotNull("PartitionOutgoingBatch", partDecor.getOutgoingBatches(i));
}
// check distribution of PartitionOutgoingBatch - should be even distribution
boolean isFirst = true;
int prevBatchCountSize = 0;
int batchCountSize = 0;
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatch = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
batchCountSize = outBatch.size();
if (!isFirst) {
assertTrue(Math.abs(batchCountSize - prevBatchCountSize) <= 1);
} else {
isFirst = false;
}
prevBatchCountSize = batchCountSize;
}
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.partitionBatch(incoming);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
if (actualThreads == 1) {
assertEquals("With single thread parent and child waitNanos should match", partitioners.get(0).getStats().getWaitNanos(), partionSenderRootExec.getStats().getWaitNanos());
}
// testing values distribution
partitioners = partDecor.getPartitioners();
isFirst = true;
// since we have fake Nullvector distribution is skewed
for (Partitioner part : partitioners) {
@SuppressWarnings("unchecked") final List<PartitionOutgoingBatch> outBatches = (List<PartitionOutgoingBatch>) part.getOutgoingBatches();
for (PartitionOutgoingBatch partOutBatch : outBatches) {
final int recordCount = ((VectorAccessible) partOutBatch).getRecordCount();
if (isFirst) {
assertEquals("RecordCount", 100, recordCount);
isFirst = false;
} else {
assertEquals("RecordCount", 0, recordCount);
}
}
}
// test exceptions within threads
// test stats merging
partionSenderRootExec.getStats().startProcessing();
try {
partDecor.executeMethodLogic(new InjectExceptionTest());
fail("executeMethodLogic should throw an exception.");
} catch (ExecutionException e) {
final OperatorProfile.Builder oPBuilder = OperatorProfile.newBuilder();
partionSenderRootExec.getStats().addAllMetrics(oPBuilder);
final List<MetricValue> metrics = oPBuilder.getMetricList();
for (MetricValue metric : metrics) {
if (Metric.BYTES_SENT.metricId() == metric.getMetricId()) {
assertEquals("Should add metricValue irrespective of exception", 5 * actualThreads, metric.getLongValue());
}
if (Metric.SENDING_THREADS_COUNT.metricId() == metric.getMetricId()) {
assertEquals(actualThreads, metric.getLongValue());
}
}
assertTrue(e.getCause() instanceof IOException);
assertEquals(actualThreads - 1, e.getCause().getSuppressed().length);
} finally {
partionSenderRootExec.getStats().stopProcessing();
}
} finally {
// cleanup
partionSenderRootExec.close();
context.close();
}
}
}
Aggregations