Search in sources :

Example 6 with PlannerConfig

use of org.apache.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.

the class SqlVsNativeBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    this.closer = Closer.create();
    final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
    final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
    final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
    log.info("Starting benchmark setup using tmpDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
    final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
    final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer);
    final PlannerConfig plannerConfig = new PlannerConfig();
    this.walker = closer.register(new SpecificSegmentsQuerySegmentWalker(conglomerate).add(dataSegment, index));
    final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
    groupByQuery = GroupByQuery.builder().setDataSource("foo").setInterval(Intervals.ETERNITY).setDimensions(new DefaultDimensionSpec("dimZipf", "d0"), new DefaultDimensionSpec("dimSequential", "d1")).setAggregatorSpecs(new CountAggregatorFactory("c")).setGranularity(Granularities.ALL).build();
    sqlQuery = "SELECT\n" + "  dimZipf AS d0," + "  dimSequential AS d1,\n" + "  COUNT(*) AS c\n" + "FROM druid.foo\n" + "GROUP BY dimZipf, dimSequential";
}
Also used : SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) DataSegment(org.apache.druid.timeline.DataSegment) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Setup(org.openjdk.jmh.annotations.Setup)

Example 7 with PlannerConfig

use of org.apache.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.

the class SqlExpressionBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("expression-testbench");
    final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
    final PlannerConfig plannerConfig = new PlannerConfig();
    final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
    log.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
    final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
    final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer, PROCESSING_CONFIG);
    final SpecificSegmentsQuerySegmentWalker walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(dataSegment, index);
    closer.register(walker);
    final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
    try {
        SqlVectorizedExpressionSanityTest.sanityTestVectorizedSqlQueries(plannerFactory, QUERIES.get(Integer.parseInt(query)));
    } catch (Throwable ignored) {
    // the show must go on
    }
}
Also used : SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) DataSegment(org.apache.druid.timeline.DataSegment) Setup(org.openjdk.jmh.annotations.Setup)

Example 8 with PlannerConfig

use of org.apache.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.

the class CalciteInsertDmlTest method testExplainInsertFromExternal.

@Test
public void testExplainInsertFromExternal() throws Exception {
    // Skip vectorization since otherwise the "context" will change for each subtest.
    skipVectorize();
    final ScanQuery expectedQuery = newScanQueryBuilder().dataSource(externalDataSource).intervals(querySegmentSpec(Filtration.eternity())).columns("x", "y", "z").context(queryJsonMapper.readValue("{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlInsertSegmentGranularity\":\"{\\\"type\\\":\\\"all\\\"}\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}", JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT)).build();
    final String expectedExplanation = "DruidQueryRel(query=[" + queryJsonMapper.writeValueAsString(expectedQuery) + "], signature=[{x:STRING, y:STRING, z:LONG}])\n";
    // Use testQuery for EXPLAIN (not testInsertQuery).
    testQuery(new PlannerConfig(), StringUtils.format("EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s PARTITIONED BY ALL TIME", externSql(externalDataSource)), CalciteTests.SUPER_USER_AUTH_RESULT, ImmutableList.of(), ImmutableList.of(new Object[] { expectedExplanation, "[{\"name\":\"EXTERNAL\",\"type\":\"EXTERNAL\"},{\"name\":\"dst\",\"type\":\"DATASOURCE\"}]" }));
    // Not using testInsertQuery, so must set didTest manually to satisfy the check in tearDown.
    didTest = true;
}
Also used : PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) ScanQuery(org.apache.druid.query.scan.ScanQuery) Test(org.junit.Test)

Example 9 with PlannerConfig

use of org.apache.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.

the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.

@Test
public void testMaxRowsPerFrame() throws Exception {
    final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {

        @Override
        public int getMaxConnections() {
            return 2;
        }

        @Override
        public int getMaxStatementsPerConnection() {
            return 4;
        }

        @Override
        public int getMaxRowsPerFrame() {
            return 2;
        }
    };
    final PlannerConfig plannerConfig = new PlannerConfig();
    final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
    final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
    final List<Meta.Frame> frames = new ArrayList<>();
    DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {

        @Override
        public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
            // overriding fetch allows us to track how many frames are processed after the first frame
            Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
            frames.add(frame);
            return frame;
        }
    };
    final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
    final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
    Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
    smallFrameServer.setHandler(handler);
    smallFrameServer.start();
    String smallFrameUrl = this.getJdbcConnectionString(port);
    Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
    final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
    List<Map<String, Object>> rows = getRows(resultSet);
    Assert.assertEquals(2, frames.size());
    Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
Also used : Server(org.eclipse.jetty.server.Server) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Connection(java.sql.Connection) AbstractAvaticaHandler(org.apache.calcite.avatica.server.AbstractAvaticaHandler) DruidOperatorTable(org.apache.druid.sql.calcite.planner.DruidOperatorTable) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) ServerConfig(org.apache.druid.server.initialization.ServerConfig) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) ResultSet(java.sql.ResultSet) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 10 with PlannerConfig

use of org.apache.druid.sql.calcite.planner.PlannerConfig in project druid by druid-io.

the class DruidAvaticaHandlerTest method testMinRowsPerFrame.

@Test
public void testMinRowsPerFrame() throws Exception {
    final int minFetchSize = 1000;
    final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {

        @Override
        public int getMaxConnections() {
            return 2;
        }

        @Override
        public int getMaxStatementsPerConnection() {
            return 4;
        }

        @Override
        public int getMinRowsPerFrame() {
            return minFetchSize;
        }
    };
    final PlannerConfig plannerConfig = new PlannerConfig();
    final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
    final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
    final List<Meta.Frame> frames = new ArrayList<>();
    DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {

        @Override
        public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
            // overriding fetch allows us to track how many frames are processed after the first frame, and also fetch size
            Assert.assertEquals(minFetchSize, fetchMaxRowCount);
            Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
            frames.add(frame);
            return frame;
        }
    };
    final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
    final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
    Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
    smallFrameServer.setHandler(handler);
    smallFrameServer.start();
    String smallFrameUrl = this.getJdbcConnectionString(port);
    Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
    // use a prepared statement because avatica currently ignores fetchSize on the initial fetch of a Statement
    PreparedStatement statement = smallFrameClient.prepareStatement("SELECT dim1 FROM druid.foo");
    // set a fetch size below the minimum configured threshold
    statement.setFetchSize(2);
    final ResultSet resultSet = statement.executeQuery();
    List<Map<String, Object>> rows = getRows(resultSet);
    // expect minimum threshold to be used, which should be enough to do this all in first fetch
    Assert.assertEquals(0, frames.size());
    Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
Also used : Server(org.eclipse.jetty.server.Server) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Connection(java.sql.Connection) AbstractAvaticaHandler(org.apache.calcite.avatica.server.AbstractAvaticaHandler) PreparedStatement(java.sql.PreparedStatement) DruidOperatorTable(org.apache.druid.sql.calcite.planner.DruidOperatorTable) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) ServerConfig(org.apache.druid.server.initialization.ServerConfig) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) ResultSet(java.sql.ResultSet) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Test(org.junit.Test)

Aggregations

PlannerConfig (org.apache.druid.sql.calcite.planner.PlannerConfig)11 DruidSchemaCatalog (org.apache.druid.sql.calcite.schema.DruidSchemaCatalog)10 PlannerFactory (org.apache.druid.sql.calcite.planner.PlannerFactory)8 ExprMacroTable (org.apache.druid.math.expr.ExprMacroTable)5 DruidOperatorTable (org.apache.druid.sql.calcite.planner.DruidOperatorTable)5 QueryRunnerFactoryConglomerate (org.apache.druid.query.QueryRunnerFactoryConglomerate)4 GeneratorSchemaInfo (org.apache.druid.segment.generator.GeneratorSchemaInfo)4 SegmentGenerator (org.apache.druid.segment.generator.SegmentGenerator)4 SpecificSegmentsQuerySegmentWalker (org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker)4 DataSegment (org.apache.druid.timeline.DataSegment)4 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)4 InetSocketAddress (java.net.InetSocketAddress)3 AbstractAvaticaHandler (org.apache.calcite.avatica.server.AbstractAvaticaHandler)3 QueryableIndex (org.apache.druid.segment.QueryableIndex)3 ServerConfig (org.apache.druid.server.initialization.ServerConfig)3 Before (org.junit.Before)3 Test (org.junit.Test)3 Setup (org.openjdk.jmh.annotations.Setup)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 Connection (java.sql.Connection)2