use of org.apache.druid.sql.calcite.schema.DruidSchemaCatalog in project druid by druid-io.
the class SqlVsNativeBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
this.closer = Closer.create();
final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
log.info("Starting benchmark setup using tmpDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer);
final PlannerConfig plannerConfig = new PlannerConfig();
this.walker = closer.register(new SpecificSegmentsQuerySegmentWalker(conglomerate).add(dataSegment, index));
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
groupByQuery = GroupByQuery.builder().setDataSource("foo").setInterval(Intervals.ETERNITY).setDimensions(new DefaultDimensionSpec("dimZipf", "d0"), new DefaultDimensionSpec("dimSequential", "d1")).setAggregatorSpecs(new CountAggregatorFactory("c")).setGranularity(Granularities.ALL).build();
sqlQuery = "SELECT\n" + " dimZipf AS d0," + " dimSequential AS d1,\n" + " COUNT(*) AS c\n" + "FROM druid.foo\n" + "GROUP BY dimZipf, dimSequential";
}
use of org.apache.druid.sql.calcite.schema.DruidSchemaCatalog in project druid by druid-io.
the class SqlExpressionBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("expression-testbench");
final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
final PlannerConfig plannerConfig = new PlannerConfig();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
log.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer, PROCESSING_CONFIG);
final SpecificSegmentsQuerySegmentWalker walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(dataSegment, index);
closer.register(walker);
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
try {
SqlVectorizedExpressionSanityTest.sanityTestVectorizedSqlQueries(plannerFactory, QUERIES.get(Integer.parseInt(query)));
} catch (Throwable ignored) {
// the show must go on
}
}
use of org.apache.druid.sql.calcite.schema.DruidSchemaCatalog in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.
@Test
public void testMaxRowsPerFrame() throws Exception {
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMaxRowsPerFrame() {
return 2;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
List<Map<String, Object>> rows = getRows(resultSet);
Assert.assertEquals(2, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.sql.calcite.schema.DruidSchemaCatalog in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMinRowsPerFrame.
@Test
public void testMinRowsPerFrame() throws Exception {
final int minFetchSize = 1000;
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMinRowsPerFrame() {
return minFetchSize;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame, and also fetch size
Assert.assertEquals(minFetchSize, fetchMaxRowCount);
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
// use a prepared statement because avatica currently ignores fetchSize on the initial fetch of a Statement
PreparedStatement statement = smallFrameClient.prepareStatement("SELECT dim1 FROM druid.foo");
// set a fetch size below the minimum configured threshold
statement.setFetchSize(2);
final ResultSet resultSet = statement.executeQuery();
List<Map<String, Object>> rows = getRows(resultSet);
// expect minimum threshold to be used, which should be enough to do this all in first fetch
Assert.assertEquals(0, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.sql.calcite.schema.DruidSchemaCatalog in project druid by druid-io.
the class CalciteTests method createMockRootSchema.
public static DruidSchemaCatalog createMockRootSchema(final QueryRunnerFactoryConglomerate conglomerate, final SpecificSegmentsQuerySegmentWalker walker, final PlannerConfig plannerConfig, @Nullable final ViewManager viewManager, final DruidSchemaManager druidSchemaManager, final AuthorizerMapper authorizerMapper) {
DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig, druidSchemaManager);
SystemSchema systemSchema = CalciteTests.createMockSystemSchema(druidSchema, walker, plannerConfig, authorizerMapper);
LookupSchema lookupSchema = CalciteTests.createMockLookupSchema();
ViewSchema viewSchema = viewManager != null ? new ViewSchema(viewManager) : null;
SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus();
Set<NamedSchema> namedSchemas = new HashSet<>();
namedSchemas.add(new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME));
namedSchemas.add(new NamedSystemSchema(plannerConfig, systemSchema));
namedSchemas.add(new NamedLookupSchema(lookupSchema));
if (viewSchema != null) {
namedSchemas.add(new NamedViewSchema(viewSchema));
}
DruidSchemaCatalog catalog = new DruidSchemaCatalog(rootSchema, namedSchemas.stream().collect(Collectors.toMap(NamedSchema::getSchemaName, x -> x)));
InformationSchema informationSchema = new InformationSchema(catalog, authorizerMapper);
rootSchema.add(CalciteTests.DRUID_SCHEMA_NAME, druidSchema);
rootSchema.add(CalciteTests.INFORMATION_SCHEMA_NAME, informationSchema);
rootSchema.add(NamedSystemSchema.NAME, systemSchema);
rootSchema.add(NamedLookupSchema.NAME, lookupSchema);
if (viewSchema != null) {
rootSchema.add(NamedViewSchema.NAME, viewSchema);
}
return catalog;
}
Aggregations