use of org.apache.druid.query.aggregation.DoubleSumAggregatorFactory in project druid by druid-io.
the class VectorGroupByEngineIteratorTest method testCreateOneGrouperAndCloseItWhenClose.
@Test
public void testCreateOneGrouperAndCloseItWhenClose() throws IOException {
final Interval interval = TestIndex.DATA_INTERVAL;
final AggregatorFactory factory = new DoubleSumAggregatorFactory("index", "index");
final GroupByQuery query = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setInterval(interval).setDimensions(new DefaultDimensionSpec("market", null, null)).setAggregatorSpecs(factory).build();
final StorageAdapter storageAdapter = new QueryableIndexStorageAdapter(TestIndex.getMMappedTestIndex());
final ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[4096]);
final VectorCursor cursor = storageAdapter.makeVectorCursor(Filters.toFilter(query.getDimFilter()), interval, query.getVirtualColumns(), false, QueryContexts.getVectorSize(query), null);
final List<GroupByVectorColumnSelector> dimensions = query.getDimensions().stream().map(dimensionSpec -> ColumnProcessors.makeVectorProcessor(dimensionSpec, GroupByVectorColumnProcessorFactory.instance(), cursor.getColumnSelectorFactory())).collect(Collectors.toList());
final MutableObject<VectorGrouper> grouperCaptor = new MutableObject<>();
final VectorGroupByEngineIterator iterator = new VectorGroupByEngineIterator(query, new GroupByQueryConfig(), storageAdapter, cursor, interval, dimensions, byteBuffer, null) {
@Override
VectorGrouper makeGrouper() {
grouperCaptor.setValue(Mockito.spy(super.makeGrouper()));
return grouperCaptor.getValue();
}
};
iterator.close();
Mockito.verify(grouperCaptor.getValue()).close();
}
use of org.apache.druid.query.aggregation.DoubleSumAggregatorFactory in project druid by druid-io.
the class SimpleTestIndex method makeRealtimeIndex.
private static IncrementalIndex makeRealtimeIndex() {
try {
List<InputRow> inputRows = Lists.newArrayListWithExpectedSize(NUM_ROWS);
for (int i = 1; i <= NUM_ROWS; i++) {
double doubleVal = i + 0.7d;
String stringVal = String.valueOf(doubleVal);
inputRows.add(new MapBasedInputRow(DateTime.now(DateTimeZone.UTC), DIMENSIONS, ImmutableMap.of(DOUBLE_COL, doubleVal, SINGLE_VALUE_DOUBLE_AS_STRING_DIM, stringVal, MULTI_VALUE_DOUBLE_AS_STRING_DIM, Lists.newArrayList(stringVal, null, stringVal))));
}
return AggregationTestHelper.createIncrementalIndex(inputRows.iterator(), new NoopInputRowParser(null), new AggregatorFactory[] { new CountAggregatorFactory("count"), new DoubleSumAggregatorFactory(DOUBLE_COL, DOUBLE_COL) }, 0, Granularities.NONE, false, 100, false);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
use of org.apache.druid.query.aggregation.DoubleSumAggregatorFactory in project druid by druid-io.
the class SystemSchemaTest method setUp.
@Before
public void setUp() throws Exception {
serverView = EasyMock.createNiceMock(TimelineServerView.class);
client = EasyMock.createMock(DruidLeaderClient.class);
coordinatorClient = EasyMock.createMock(DruidLeaderClient.class);
overlordClient = EasyMock.createMock(DruidLeaderClient.class);
mapper = TestHelper.makeJsonMapper();
responseHolder = EasyMock.createMock(StringFullResponseHolder.class);
responseHandler = EasyMock.createMockBuilder(BytesAccumulatingResponseHandler.class).withConstructor().addMockedMethod("handleResponse", HttpResponse.class, HttpResponseHandler.TrafficCop.class).addMockedMethod("getStatus").createMock();
request = EasyMock.createMock(Request.class);
authMapper = createAuthMapper();
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
final QueryableIndex index3 = IndexBuilder.create().tmpDir(new File(tmpDir, "3")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS3).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(segment1, index1).add(segment2, index2).add(segment3, index3);
druidSchema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), new TestServerInventoryView(walker.getSegments(), realtimeSegments), new SegmentManager(EasyMock.createMock(SegmentLoader.class)), new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null);
druidSchema.start();
druidSchema.awaitInitialization();
metadataView = EasyMock.createMock(MetadataSegmentView.class);
druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
serverInventoryView = EasyMock.createMock(FilteredServerInventoryView.class);
schema = new SystemSchema(druidSchema, metadataView, serverView, serverInventoryView, EasyMock.createStrictMock(AuthorizerMapper.class), client, client, druidNodeDiscoveryProvider, mapper);
}
use of org.apache.druid.query.aggregation.DoubleSumAggregatorFactory in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
druidServers = serverView.getDruidServers();
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
boolean throwException = true;
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
if (throwException) {
throwException = false;
throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
} else {
return super.refreshSegments(segments);
}
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema.start();
schema.awaitInitialization();
}
use of org.apache.druid.query.aggregation.DoubleSumAggregatorFactory in project druid by druid-io.
the class QueriesTest method testVerifyAggregationsMultiLevel.
@Test
public void testVerifyAggregationsMultiLevel() {
List<AggregatorFactory> aggFactories = Arrays.asList(new CountAggregatorFactory("count"), new DoubleSumAggregatorFactory("idx", "index"), new DoubleSumAggregatorFactory("rev", "revenue"));
List<PostAggregator> postAggs = Arrays.asList(new ArithmeticPostAggregator("divideStuff", "/", Arrays.asList(new ArithmeticPostAggregator("addStuff", "+", Arrays.asList(new FieldAccessPostAggregator("idx", "idx"), new ConstantPostAggregator("const", 1))), new ArithmeticPostAggregator("subtractStuff", "-", Arrays.asList(new FieldAccessPostAggregator("rev", "rev"), new ConstantPostAggregator("const", 1))))), new ArithmeticPostAggregator("addStuff", "+", Arrays.asList(new FieldAccessPostAggregator("divideStuff", "divideStuff"), new FieldAccessPostAggregator("count", "count"))));
boolean exceptionOccured = false;
try {
Queries.prepareAggregations(ImmutableList.of(), aggFactories, postAggs);
} catch (IllegalArgumentException e) {
exceptionOccured = true;
}
Assert.assertFalse(exceptionOccured);
}
Aggregations