use of org.apache.drill.exec.planner.fragment.SimpleParallelizer in project drill by apache.
the class TestMemoryCalculator method TestSingleMajorFragmentWithProjectAndScan.
@Test
public void TestSingleMajorFragmentWithProjectAndScan() throws Exception {
List<DrillbitEndpoint> activeEndpoints = getEndpoints(2, new HashSet<>());
Map<DrillbitEndpoint, NodeResource> resources = activeEndpoints.stream().collect(Collectors.toMap(x -> x, x -> NodeResource.create()));
String sql = "SELECT * from cp.`tpch/nation.parquet`";
SimpleParallelizer parallelizer = new QueueQueryParallelizer(false, queryContext);
PlanningSet planningSet = preparePlanningSet(activeEndpoints, DEFAULT_SLICE_TARGET, resources, sql, parallelizer);
parallelizer.adjustMemory(planningSet, createSet(planningSet.getRootWrapper()), activeEndpoints);
assertTrue("memory requirement is different", Iterables.all(resources.entrySet(), (e) -> e.getValue().getMemory() == 30));
}
use of org.apache.drill.exec.planner.fragment.SimpleParallelizer in project drill by apache.
the class TestMemoryCalculator method TestTwoMajorFragmentWithSortyProjectAndScan.
@Test
public void TestTwoMajorFragmentWithSortyProjectAndScan() throws Exception {
List<DrillbitEndpoint> activeEndpoints = getEndpoints(2, new HashSet<>());
Map<DrillbitEndpoint, NodeResource> resources = activeEndpoints.stream().collect(Collectors.toMap(x -> x, x -> NodeResource.create()));
String sql = "SELECT * from cp.`tpch/lineitem.parquet` order by dept_id";
SimpleParallelizer parallelizer = new QueueQueryParallelizer(false, queryContext);
PlanningSet planningSet = preparePlanningSet(activeEndpoints, 2, resources, sql, parallelizer);
parallelizer.adjustMemory(planningSet, createSet(planningSet.getRootWrapper()), activeEndpoints);
assertTrue("memory requirement is different", Iterables.all(resources.entrySet(), (e) -> e.getValue().getMemory() == 481490));
}
use of org.apache.drill.exec.planner.fragment.SimpleParallelizer in project drill by apache.
the class TestMemoryCalculator method TestSingleMajorFragmentWithGroupByProjectAndScan.
@Test
public void TestSingleMajorFragmentWithGroupByProjectAndScan() throws Exception {
List<DrillbitEndpoint> activeEndpoints = getEndpoints(2, new HashSet<>());
Map<DrillbitEndpoint, NodeResource> resources = activeEndpoints.stream().collect(Collectors.toMap(x -> x, x -> NodeResource.create()));
String sql = "SELECT dept_id, count(*) from cp.`tpch/lineitem.parquet` group by dept_id";
SimpleParallelizer parallelizer = new QueueQueryParallelizer(false, queryContext);
PlanningSet planningSet = preparePlanningSet(activeEndpoints, DEFAULT_SLICE_TARGET, resources, sql, parallelizer);
parallelizer.adjustMemory(planningSet, createSet(planningSet.getRootWrapper()), activeEndpoints);
assertTrue("memory requirement is different", Iterables.all(resources.entrySet(), (e) -> e.getValue().getMemory() == 529570));
}
use of org.apache.drill.exec.planner.fragment.SimpleParallelizer in project drill by apache.
the class TestFragmentChecker method print.
private void print(String fragmentFile, int bitCount, int expectedFragmentCount) throws Exception {
PhysicalPlanReader ppr = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
Fragment fragmentRoot = getRootFragment(ppr, fragmentFile);
SimpleParallelizer par = new DefaultQueryParallelizer(true, 1000 * 1000, 5, 10, 1.2);
List<DrillbitEndpoint> endpoints = Lists.newArrayList();
DrillbitEndpoint localBit = null;
for (int i = 0; i < bitCount; i++) {
DrillbitEndpoint b1 = DrillbitEndpoint.newBuilder().setAddress("localhost").setControlPort(1234 + i).build();
if (i == 0) {
localBit = b1;
}
endpoints.add(b1);
}
final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName", "938ea2d9-7cb9-4baf-9414-a5a0b7777e8e");
QueryWorkUnit qwu = par.generateWorkUnit(new OptionList(), localBit, QueryId.getDefaultInstance(), endpoints, fragmentRoot, UserSession.Builder.newBuilder().withCredentials(UserBitShared.UserCredentials.newBuilder().setUserName("foo").build()).build(), queryContextInfo);
qwu.applyPlan(ppr);
assertEquals(expectedFragmentCount, qwu.getFragments().size() + 1);
}
Aggregations