use of org.apache.druid.sql.calcite.planner.DruidOperatorTable in project druid by druid-io.
the class DruidAvaticaHandlerTest method setUp.
@Before
public void setUp() throws Exception {
walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder());
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, CalciteTests.TEST_AUTHORIZER_MAPPER);
testRequestLogger = new TestRequestLogger();
injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1);
binder.bind(AuthenticatorMapper.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_MAPPER);
binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER);
binder.bind(Escalator.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_ESCALATOR);
binder.bind(RequestLogger.class).toInstance(testRequestLogger);
binder.bind(DruidSchemaCatalog.class).toInstance(rootSchema);
for (NamedSchema schema : rootSchema.getNamedSchemas().values()) {
Multibinder.newSetBinder(binder, NamedSchema.class).addBinding().toInstance(schema);
}
binder.bind(QueryLifecycleFactory.class).toInstance(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate));
binder.bind(DruidOperatorTable.class).toInstance(operatorTable);
binder.bind(ExprMacroTable.class).toInstance(macroTable);
binder.bind(PlannerConfig.class).toInstance(plannerConfig);
binder.bind(String.class).annotatedWith(DruidSchemaName.class).toInstance(CalciteTests.DRUID_SCHEMA_NAME);
binder.bind(AvaticaServerConfig.class).toInstance(AVATICA_CONFIG);
binder.bind(ServiceEmitter.class).to(NoopServiceEmitter.class);
binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class);
binder.bind(QueryScheduler.class).toProvider(QuerySchedulerProvider.class).in(LazySingleton.class);
binder.bind(QueryMakerFactory.class).to(NativeQueryMakerFactory.class);
}
}));
druidMeta = injector.getInstance(DruidMeta.class);
final AbstractAvaticaHandler handler = this.getAvaticaHandler(druidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 10000;
server = new Server(new InetSocketAddress("127.0.0.1", port));
server.setHandler(handler);
server.start();
url = this.getJdbcConnectionString(port);
client = DriverManager.getConnection(url, "regularUser", "druid");
superuserClient = DriverManager.getConnection(url, CalciteTests.TEST_SUPERUSER_NAME, "druid");
clientNoTrailingSlash = DriverManager.getConnection(StringUtils.maybeRemoveTrailingSlash(url), CalciteTests.TEST_SUPERUSER_NAME, "druid");
final Properties propertiesLosAngeles = new Properties();
propertiesLosAngeles.setProperty("sqlTimeZone", "America/Los_Angeles");
propertiesLosAngeles.setProperty("user", "regularUserLA");
propertiesLosAngeles.setProperty(BaseQuery.SQL_QUERY_ID, DUMMY_SQL_QUERY_ID);
clientLosAngeles = DriverManager.getConnection(url, propertiesLosAngeles);
}
use of org.apache.druid.sql.calcite.planner.DruidOperatorTable in project druid by druid-io.
the class SqlResourceTest method setUp.
@Before
public void setUp() throws Exception {
final QueryScheduler scheduler = new QueryScheduler(5, ManualQueryPrioritizationStrategy.INSTANCE, new HiLoQueryLaningStrategy(40), new ServerConfig()) {
@Override
public <T> Sequence<T> run(Query<?> query, Sequence<T> resultSequence) {
return super.run(query, new LazySequence<T>(() -> {
if (sleep) {
try {
// pretend to be a query that is waiting on results
Thread.sleep(500);
} catch (InterruptedException ignored) {
}
}
return resultSequence;
}));
}
};
executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s"));
walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler);
final PlannerConfig plannerConfig = new PlannerConfig() {
@Override
public boolean shouldSerializeComplexValues() {
return false;
}
};
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, CalciteTests.TEST_AUTHORIZER_MAPPER);
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
req = EasyMock.createStrictMock(HttpServletRequest.class);
EasyMock.expect(req.getRemoteAddr()).andReturn(null).once();
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH)).andReturn(null).anyTimes();
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes();
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
req.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true);
EasyMock.expectLastCall().anyTimes();
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
EasyMock.replay(req);
testRequestLogger = new TestRequestLogger();
final PlannerFactory plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, CalciteTests.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
lifecycleManager = new SqlLifecycleManager() {
@Override
public void add(String sqlQueryId, SqlLifecycle lifecycle) {
super.add(sqlQueryId, lifecycle);
if (lifecycleAddLatch != null) {
lifecycleAddLatch.countDown();
}
}
};
final ServiceEmitter emitter = new NoopServiceEmitter();
sqlLifecycleFactory = new SqlLifecycleFactory(plannerFactory, emitter, testRequestLogger, scheduler) {
@Override
public SqlLifecycle factorize() {
return new TestSqlLifecycle(plannerFactory, emitter, testRequestLogger, scheduler, System.currentTimeMillis(), System.nanoTime(), validateAndAuthorizeLatchSupplier, planLatchSupplier, executeLatchSupplier, sequenceMapFnSupplier);
}
};
resource = new SqlResource(JSON_MAPPER, CalciteTests.TEST_AUTHORIZER_MAPPER, sqlLifecycleFactory, lifecycleManager, new ServerConfig());
}
use of org.apache.druid.sql.calcite.planner.DruidOperatorTable in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.
@Test
public void testMaxRowsPerFrame() throws Exception {
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMaxRowsPerFrame() {
return 2;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
List<Map<String, Object>> rows = getRows(resultSet);
Assert.assertEquals(2, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.sql.calcite.planner.DruidOperatorTable in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMinRowsPerFrame.
@Test
public void testMinRowsPerFrame() throws Exception {
final int minFetchSize = 1000;
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMinRowsPerFrame() {
return minFetchSize;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame, and also fetch size
Assert.assertEquals(minFetchSize, fetchMaxRowCount);
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
// use a prepared statement because avatica currently ignores fetchSize on the initial fetch of a Statement
PreparedStatement statement = smallFrameClient.prepareStatement("SELECT dim1 FROM druid.foo");
// set a fetch size below the minimum configured threshold
statement.setFetchSize(2);
final ResultSet resultSet = statement.executeQuery();
List<Map<String, Object>> rows = getRows(resultSet);
// expect minimum threshold to be used, which should be enough to do this all in first fetch
Assert.assertEquals(0, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.sql.calcite.planner.DruidOperatorTable in project druid by druid-io.
the class DruidStatementTest method setUp.
@Before
public void setUp() throws Exception {
walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder());
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
final PlannerFactory plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
this.sqlLifecycleFactory = CalciteTests.createSqlLifecycleFactory(plannerFactory);
}
Aggregations