use of org.apache.druid.query.DruidProcessingConfig in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method setupGroupByFactory.
private void setupGroupByFactory() {
executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");
final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 2);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> tooSmallMergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 255), 2);
resourceCloser.register(bufferPool);
resourceCloser.register(mergePool);
resourceCloser.register(tooSmallMergePool);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return "v2";
}
@Override
public int getBufferGrouperInitialBuckets() {
return -1;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
DruidProcessingConfig tooSmallDruidProcessingConfig = new DruidProcessingConfig() {
@Override
public int intermediateComputeSizeBytes() {
return 255;
}
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
final GroupByStrategySelector tooSmallStrategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(tooSmallDruidProcessingConfig, configSupplier, bufferPool, tooSmallMergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
groupByFactory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
tooSmallGroupByFactory = new GroupByQueryRunnerFactory(tooSmallStrategySelector, new GroupByQueryQueryToolChest(tooSmallStrategySelector));
}
use of org.apache.druid.query.DruidProcessingConfig in project druid by druid-io.
the class GroupByMultiSegmentTest method setupGroupByFactory.
private void setupGroupByFactory() {
executorService = Execs.multiThreaded(2, "GroupByThreadPool[%d]");
final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 2);
resourceCloser.register(bufferPool);
resourceCloser.register(mergePool);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return "v2";
}
@Override
public int getBufferGrouperInitialBuckets() {
return -1;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
groupByFactory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
}
use of org.apache.druid.query.DruidProcessingConfig in project druid by druid-io.
the class NestedQueryPushDownTest method setupGroupByFactory.
private void setupGroupByFactory() {
executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");
NonBlockingPool<ByteBuffer> bufferPool = new StupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);
// limit of 3 is required since we simulate running historical running nested query and broker doing the final merge
BlockingPool<ByteBuffer> mergePool = new DefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 10);
// limit of 3 is required since we simulate running historical running nested query and broker doing the final merge
BlockingPool<ByteBuffer> mergePool2 = new DefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 10);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return "v2";
}
@Override
public int getBufferGrouperInitialBuckets() {
return -1;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
final GroupByStrategySelector strategySelector2 = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool2, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
groupByFactory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
groupByFactory2 = new GroupByQueryRunnerFactory(strategySelector2, new GroupByQueryQueryToolChest(strategySelector2));
}
use of org.apache.druid.query.DruidProcessingConfig in project druid by druid-io.
the class BrokerProcessingModuleTest method testMergeProcessingPool.
@Test
public void testMergeProcessingPool() {
DruidProcessingConfig config = new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "processing-test-%s";
}
};
DruidProcessingModule module = new DruidProcessingModule();
module.getMergeProcessingPoolProvider(config);
config.getNumInitalBuffersForIntermediatePool();
}
use of org.apache.druid.query.DruidProcessingConfig in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
Aggregations