use of org.apache.druid.server.initialization.ServerConfig in project druid by druid-io.
the class QueryResourceTest method testTooManyQueryInLaneImplicitFromDurationThreshold.
@Test(timeout = 10_000L)
public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws InterruptedException {
expectPermissiveHappyPathAuth();
final CountDownLatch waitTwoStarted = new CountDownLatch(2);
final CountDownLatch waitOneScheduled = new CountDownLatch(1);
final CountDownLatch waitAllFinished = new CountDownLatch(3);
final QueryScheduler scheduler = new QueryScheduler(40, new ThresholdBasedQueryPrioritizationStrategy(null, "P90D", null, null), new HiLoQueryLaningStrategy(1), new ServerConfig());
createScheduledQueryResource(scheduler, ImmutableList.of(waitTwoStarted), ImmutableList.of(waitOneScheduled));
assertResponseAndCountdownOrBlockForever(SIMPLE_TIMESERIES_QUERY, waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()));
waitOneScheduled.await();
assertResponseAndCountdownOrBlockForever(SIMPLE_TIMESERIES_QUERY, waitAllFinished, response -> {
Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus());
QueryCapacityExceededException ex;
try {
ex = jsonMapper.readValue((byte[]) response.getEntity(), QueryCapacityExceededException.class);
} catch (IOException e) {
throw new RuntimeException(e);
}
Assert.assertEquals(QueryCapacityExceededException.makeLaneErrorMessage(HiLoQueryLaningStrategy.LOW, 1), ex.getMessage());
Assert.assertEquals(QueryCapacityExceededException.ERROR_CODE, ex.getErrorCode());
});
waitTwoStarted.await();
assertResponseAndCountdownOrBlockForever(SIMPLE_TIMESERIES_QUERY_SMALLISH_INTERVAL, waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()));
waitAllFinished.await();
}
use of org.apache.druid.server.initialization.ServerConfig in project druid by druid-io.
the class QuerySchedulerTest method createInjector.
private Injector createInjector() {
Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(binder -> {
binder.bind(ServerConfig.class).toInstance(new ServerConfig());
JsonConfigProvider.bind(binder, "druid.query.scheduler", QuerySchedulerProvider.class, Global.class);
}));
ObjectMapper mapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
mapper.setInjectableValues(new InjectableValues.Std().addValue(ServerConfig.class, injector.getInstance(ServerConfig.class)));
return injector;
}
use of org.apache.druid.server.initialization.ServerConfig in project druid by druid-io.
the class SetAndVerifyContextQueryRunnerTest method testTimeoutZeroIsNotImmediateTimeoutExplicitServersideMax.
@Test
public void testTimeoutZeroIsNotImmediateTimeoutExplicitServersideMax() {
Query<ScanResultValue> query = new Druids.ScanQueryBuilder().dataSource("foo").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).context(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 0)).build();
ServerConfig defaultConfig = new ServerConfig() {
@Override
public long getMaxQueryTimeout() {
return 10000L;
}
};
QueryRunner<ScanResultValue> mockRunner = EasyMock.createMock(QueryRunner.class);
SetAndVerifyContextQueryRunner<ScanResultValue> queryRunner = new SetAndVerifyContextQueryRunner<>(defaultConfig, mockRunner);
Query<ScanResultValue> transformed = queryRunner.withTimeoutAndMaxScatterGatherBytes(query, defaultConfig);
// timeout is set to 0, so withTimeoutAndMaxScatterGatherBytes should set QUERY_FAIL_TIME to be the current
// time + max query timeout at the time the method was called
// this means that the fail time should be greater than the current time when checking
Assert.assertTrue(System.currentTimeMillis() < (Long) transformed.getContextValue(DirectDruidClient.QUERY_FAIL_TIME));
}
use of org.apache.druid.server.initialization.ServerConfig in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.
@Test
public void testMaxRowsPerFrame() throws Exception {
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMaxRowsPerFrame() {
return 2;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
List<Map<String, Object>> rows = getRows(resultSet);
Assert.assertEquals(2, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.server.initialization.ServerConfig in project druid by druid-io.
the class ErrorHandlerTest method testErrorHandlerDefaultErrorResponseTransformStrategySanitizesErrorAsExpected.
@Test
public void testErrorHandlerDefaultErrorResponseTransformStrategySanitizesErrorAsExpected() {
ServerConfig serverConfig = new ServerConfig();
ErrorHandler errorHandler = new ErrorHandler(serverConfig);
QueryInterruptedException input = new QueryInterruptedException("error", "error messagez", "error class", "host");
RuntimeException output = errorHandler.sanitize(input);
Assert.assertEquals("error messagez", output.getMessage());
}
Aggregations