use of org.apache.druid.collections.CloseableStupidPool in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method setupGroupByFactory.
private void setupGroupByFactory() {
executorService = Execs.multiThreaded(3, "GroupByThreadPool[%d]");
final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 2);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> tooSmallMergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 255), 2);
resourceCloser.register(bufferPool);
resourceCloser.register(mergePool);
resourceCloser.register(tooSmallMergePool);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return "v2";
}
@Override
public int getBufferGrouperInitialBuckets() {
return -1;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
DruidProcessingConfig tooSmallDruidProcessingConfig = new DruidProcessingConfig() {
@Override
public int intermediateComputeSizeBytes() {
return 255;
}
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
final GroupByStrategySelector tooSmallStrategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(tooSmallDruidProcessingConfig, configSupplier, bufferPool, tooSmallMergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
groupByFactory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
tooSmallGroupByFactory = new GroupByQueryRunnerFactory(tooSmallStrategySelector, new GroupByQueryQueryToolChest(tooSmallStrategySelector));
}
use of org.apache.druid.collections.CloseableStupidPool in project druid by druid-io.
the class GroupByMultiSegmentTest method setupGroupByFactory.
private void setupGroupByFactory() {
executorService = Execs.multiThreaded(2, "GroupByThreadPool[%d]");
final CloseableStupidPool<ByteBuffer> bufferPool = new CloseableStupidPool<>("GroupByBenchmark-computeBufferPool", new OffheapBufferGenerator("compute", 10_000_000), 0, Integer.MAX_VALUE);
// limit of 2 is required since we simulate both historical merge and broker merge in the same process
final CloseableDefaultBlockingPool<ByteBuffer> mergePool = new CloseableDefaultBlockingPool<>(new OffheapBufferGenerator("merge", 10_000_000), 2);
resourceCloser.register(bufferPool);
resourceCloser.register(mergePool);
final GroupByQueryConfig config = new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return "v2";
}
@Override
public int getBufferGrouperInitialBuckets() {
return -1;
}
@Override
public long getMaxOnDiskStorage() {
return 1_000_000_000L;
}
};
config.setSingleThreaded(false);
config.setMaxIntermediateRows(Integer.MAX_VALUE);
config.setMaxResults(Integer.MAX_VALUE);
DruidProcessingConfig druidProcessingConfig = new DruidProcessingConfig() {
@Override
public int getNumThreads() {
// Used by "v2" strategy for concurrencyHint
return 2;
}
@Override
public String getFormatString() {
return null;
}
};
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(configSupplier, new GroupByStrategyV1(configSupplier, new GroupByQueryEngine(configSupplier, bufferPool), NOOP_QUERYWATCHER), new GroupByStrategyV2(druidProcessingConfig, configSupplier, bufferPool, mergePool, new ObjectMapper(new SmileFactory()), NOOP_QUERYWATCHER));
groupByFactory = new GroupByQueryRunnerFactory(strategySelector, new GroupByQueryQueryToolChest(strategySelector));
}
use of org.apache.druid.collections.CloseableStupidPool in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testSingleValueTopN.
@Test
public void testSingleValueTopN() throws IOException {
IncrementalIndex index = indexCreator.createIndex();
DateTime t = DateTimes.nowUtc();
index.add(new MapBasedInputRow(t.minus(1).getMillis(), Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("TopNQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
TopNQueryEngine engine = new TopNQueryEngine(pool);
final Iterable<Result<TopNResultValue>> results = engine.query(new TopNQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Collections.singletonList(new Interval(DateTimes.EPOCH, DateTimes.nowUtc()))).dimension("sally").metric("cnt").threshold(10).aggregators(new LongSumAggregatorFactory("cnt", "cnt")).build(), new IncrementalIndexStorageAdapter(index), null).toList();
Assert.assertEquals(1, Iterables.size(results));
Assert.assertEquals(1, results.iterator().next().getValue().getValue().size());
}
}
use of org.apache.druid.collections.CloseableStupidPool in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testFilterByNull.
@Test
public void testFilterByNull() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).setDimFilter(DimFilters.dimEquals("sally", (String) null)).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(1, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
}
}
use of org.apache.druid.collections.CloseableStupidPool in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testObjectColumnSelectorOnVaryingColumnSchema.
@Test
public void testObjectColumnSelectorOnVaryingColumnSchema() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T00:00:00"), Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T01:00:00"), Lists.newArrayList("billy", "sally"), ImmutableMap.of("billy", "hip", "sally", "hop")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).addAggregator(new JavaScriptAggregatorFactory("fieldLength", Arrays.asList("sally", "billy"), "function(current, s, b) { return current + (s == null ? 0 : s.length) + (b == null ? 0 : b.length); }", "function() { return 0; }", "function(a,b) { return a + b; }", JavaScriptConfig.getEnabledInstance())).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(2, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L, "fieldLength", 2.0), row.getEvent());
row = (MapBasedRow) results.get(1);
Assert.assertEquals(ImmutableMap.of("billy", "hip", "sally", "hop", "cnt", 1L, "fieldLength", 6.0), row.getEvent());
}
}
Aggregations