use of io.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.
the class DruidAvaticaHandlerTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
walker = CalciteTests.createMockWalker(temporaryFolder.newFolder());
final PlannerConfig plannerConfig = new PlannerConfig();
final SchemaPlus rootSchema = Calcites.createRootSchema(CalciteTests.createMockSchema(walker, plannerConfig));
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final DruidAvaticaHandler handler = new DruidAvaticaHandler(new DruidMeta(new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig), AVATICA_CONFIG), new DruidNode("dummy", "dummy", 1), new AvaticaMonitor());
final int port = new Random().nextInt(9999) + 10000;
server = new Server(new InetSocketAddress("127.0.0.1", port));
server.setHandler(handler);
server.start();
final String url = String.format("jdbc:avatica:remote:url=http://127.0.0.1:%d%s", port, DruidAvaticaHandler.AVATICA_PATH);
client = DriverManager.getConnection(url);
final Properties propertiesLosAngeles = new Properties();
propertiesLosAngeles.setProperty("sqlTimeZone", "America/Los_Angeles");
clientLosAngeles = DriverManager.getConnection(url, propertiesLosAngeles);
}
use of io.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.
the class DruidSemiJoinRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Project project = call.rel(0);
final Join join = call.rel(1);
final DruidRel left = call.rel(2);
final DruidRel right = call.rel(3);
final ImmutableBitSet bits = RelOptUtil.InputFinder.bits(project.getProjects(), null);
final ImmutableBitSet rightBits = ImmutableBitSet.range(left.getRowType().getFieldCount(), join.getRowType().getFieldCount());
if (bits.intersects(rightBits)) {
return;
}
final JoinInfo joinInfo = join.analyzeCondition();
final List<Integer> rightDimsOut = new ArrayList<>();
for (DimensionSpec dimensionSpec : right.getQueryBuilder().getGrouping().getDimensions()) {
rightDimsOut.add(right.getOutputRowSignature().getRowOrder().indexOf(dimensionSpec.getOutputName()));
}
if (!joinInfo.isEqui() || !joinInfo.rightSet().equals(ImmutableBitSet.of(rightDimsOut))) {
// By the way, neither a super-set nor a sub-set would work.
return;
}
final RelBuilder relBuilder = call.builder();
final PlannerConfig plannerConfig = left.getPlannerContext().getPlannerConfig();
if (join.getJoinType() == JoinRelType.LEFT) {
// Join can be eliminated since the right-hand side cannot have any effect (nothing is being selected,
// and LEFT means even if there is no match, a left-hand row will still be included).
relBuilder.push(left);
} else {
final DruidSemiJoin druidSemiJoin = DruidSemiJoin.from(left, right, joinInfo.leftKeys, joinInfo.rightKeys, plannerConfig);
if (druidSemiJoin == null) {
return;
}
// Check maxQueryCount.
if (plannerConfig.getMaxQueryCount() > 0 && druidSemiJoin.getQueryCount() > plannerConfig.getMaxQueryCount()) {
return;
}
relBuilder.push(druidSemiJoin);
}
call.transformTo(relBuilder.project(project.getProjects(), project.getRowType().getFieldNames()).build());
}
use of io.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.
the class DruidStatementTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
walker = CalciteTests.createMockWalker(temporaryFolder.newFolder());
final PlannerConfig plannerConfig = new PlannerConfig();
final SchemaPlus rootSchema = Calcites.createRootSchema(CalciteTests.createMockSchema(walker, plannerConfig));
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
plannerFactory = new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig);
}
use of io.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.
the class CalciteQueryTest method getResults.
private List<Object[]> getResults(final PlannerContext plannerContext, final String sql) throws Exception {
final PlannerConfig plannerConfig = plannerContext.getPlannerConfig();
final DruidSchema druidSchema = CalciteTests.createMockSchema(walker, plannerConfig);
final SchemaPlus rootSchema = Calcites.createRootSchema(druidSchema);
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final PlannerFactory plannerFactory = new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig);
try (DruidPlanner planner = plannerFactory.createPlanner(plannerContext.getQueryContext())) {
final PlannerResult plan = planner.plan(sql);
return Sequences.toList(plan.run(), Lists.<Object[]>newArrayList());
}
}
use of io.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.
the class SqlBenchmark method setup.
@Setup(Level.Trial)
public void setup() throws Exception {
tmpDir = Files.createTempDir();
log.info("Starting benchmark setup using tmpDir[%s], rows[%,d].", tmpDir, rowsPerSegment);
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
final BenchmarkSchemaInfo schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get("basic");
final BenchmarkDataGenerator dataGenerator = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED + 1, schemaInfo.getDataInterval(), rowsPerSegment);
final List<InputRow> rows = Lists.newArrayList();
for (int i = 0; i < rowsPerSegment; i++) {
final InputRow row = dataGenerator.nextRow();
if (i % 20000 == 0) {
log.info("%,d/%,d rows generated.", i, rowsPerSegment);
}
rows.add(row);
}
log.info("%,d/%,d rows generated.", rows.size(), rowsPerSegment);
final PlannerConfig plannerConfig = new PlannerConfig();
final QueryRunnerFactoryConglomerate conglomerate = CalciteTests.queryRunnerFactoryConglomerate();
final QueryableIndex index = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).rows(rows).buildMMappedIndex();
this.walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource("foo").interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
final Map<String, Table> tableMap = ImmutableMap.<String, Table>of("foo", new DruidTable(new TableDataSource("foo"), RowSignature.builder().add("__time", ValueType.LONG).add("dimSequential", ValueType.STRING).add("dimZipf", ValueType.STRING).add("dimUniform", ValueType.STRING).build()));
final Schema druidSchema = new AbstractSchema() {
@Override
protected Map<String, Table> getTableMap() {
return tableMap;
}
};
plannerFactory = new PlannerFactory(Calcites.createRootSchema(druidSchema), walker, CalciteTests.createOperatorTable(), plannerConfig);
groupByQuery = GroupByQuery.builder().setDataSource("foo").setInterval(new Interval(JodaUtils.MIN_INSTANT, JodaUtils.MAX_INSTANT)).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec("dimZipf", "d0"), new DefaultDimensionSpec("dimSequential", "d1"))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(new CountAggregatorFactory("c"))).setGranularity(Granularities.ALL).build();
sqlQuery = "SELECT\n" + " dimZipf AS d0," + " dimSequential AS d1,\n" + " COUNT(*) AS c\n" + "FROM druid.foo\n" + "GROUP BY dimZipf, dimSequential";
}
Aggregations