use of org.apache.druid.query.QueryRunnerFactoryConglomerate in project druid by druid-io.
the class QueryStackTests method createQueryRunnerFactoryConglomerate.
public static QueryRunnerFactoryConglomerate createQueryRunnerFactoryConglomerate(final Closer closer, final DruidProcessingConfig processingConfig, final Supplier<Integer> minTopNThresholdSupplier) {
final CloseableStupidPool<ByteBuffer> stupidPool = new CloseableStupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(COMPUTE_BUFFER_SIZE));
closer.register(stupidPool);
final Pair<GroupByQueryRunnerFactory, Closer> factoryCloserPair = GroupByQueryRunnerTest.makeQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return GroupByStrategySelector.STRATEGY_V2;
}
}, processingConfig);
final GroupByQueryRunnerFactory groupByQueryRunnerFactory = factoryCloserPair.lhs;
closer.register(factoryCloserPair.rhs);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(SegmentMetadataQuery.class, new SegmentMetadataQueryRunnerFactory(new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig("P1W")), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(ScanQuery.class, new ScanQueryRunnerFactory(new ScanQueryQueryToolChest(new ScanQueryConfig(), new DefaultGenericQueryMetricsFactory()), new ScanQueryEngine(), new ScanQueryConfig())).put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(stupidPool, new TopNQueryQueryToolChest(new TopNQueryConfig() {
@Override
public int getMinTopNThreshold() {
return minTopNThresholdSupplier.get();
}
}), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, groupByQueryRunnerFactory).build());
return conglomerate;
}
use of org.apache.druid.query.QueryRunnerFactoryConglomerate in project druid by druid-io.
the class ServerManagerTest method setUp.
@Before
public void setUp() {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
queryWaitLatch = new CountDownLatch(1);
queryWaitYieldLatch = new CountDownLatch(1);
queryNotifyLatch = new CountDownLatch(1);
factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
serverManagerExec = Executors.newFixedThreadPool(2);
segmentManager = new SegmentManager(new SegmentLoader() {
@Override
public ReferenceCountingSegment getSegment(final DataSegment segment, boolean lazy, SegmentLazyLoadFailCallback SegmentLazyLoadFailCallback) {
return ReferenceCountingSegment.wrapSegment(new SegmentForTesting(MapUtils.getString(segment.getLoadSpec(), "version"), (Interval) segment.getLoadSpec().get("interval")), segment.getShardSpec());
}
@Override
public void cleanup(DataSegment segment) {
}
});
serverManager = new ServerManager(new QueryRunnerFactoryConglomerate() {
@Override
public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(QueryType query) {
if (query instanceof SearchQuery) {
return (QueryRunnerFactory) factory;
} else {
return null;
}
}
}, new NoopServiceEmitter(), new ForwardingQueryProcessingPool(serverManagerExec), new ForegroundCachePopulator(new DefaultObjectMapper(), new CachePopulatorStats(), -1), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig(), segmentManager, NoopJoinableFactory.INSTANCE, new ServerConfig());
loadQueryable("test", "1", Intervals.of("P1d/2011-04-01"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-02"));
loadQueryable("test", "2", Intervals.of("P1d/2011-04-02"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-03"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-04"));
loadQueryable("test", "1", Intervals.of("P1d/2011-04-05"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T01"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T02"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T03"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T05"));
loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T06"));
loadQueryable("test2", "1", Intervals.of("P1d/2011-04-01"));
loadQueryable("test2", "1", Intervals.of("P1d/2011-04-02"));
}
use of org.apache.druid.query.QueryRunnerFactoryConglomerate in project druid by druid-io.
the class DumpSegmentTest method testExecuteQuery.
@Test
public void testExecuteQuery() {
Injector injector = Mockito.mock(Injector.class);
QueryRunnerFactoryConglomerate conglomerate = Mockito.mock(QueryRunnerFactoryConglomerate.class);
QueryRunnerFactory factory = Mockito.mock(QueryRunnerFactory.class, Mockito.RETURNS_DEEP_STUBS);
QueryRunner runner = Mockito.mock(QueryRunner.class);
QueryRunner mergeRunner = Mockito.mock(QueryRunner.class);
Query query = Mockito.mock(Query.class);
Sequence expected = Sequences.simple(Collections.singletonList(123));
Mockito.when(injector.getInstance(QueryRunnerFactoryConglomerate.class)).thenReturn(conglomerate);
Mockito.when(conglomerate.findFactory(ArgumentMatchers.any())).thenReturn(factory);
Mockito.when(factory.createRunner(ArgumentMatchers.any())).thenReturn(runner);
Mockito.when(factory.getToolchest().mergeResults(factory.mergeRunners(DirectQueryProcessingPool.INSTANCE, ImmutableList.of(runner)))).thenReturn(mergeRunner);
Mockito.when(mergeRunner.run(ArgumentMatchers.any(), ArgumentMatchers.any())).thenReturn(expected);
Sequence actual = DumpSegment.executeQuery(injector, null, query);
Assert.assertSame(expected, actual);
}
use of org.apache.druid.query.QueryRunnerFactoryConglomerate in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory(final File directory) {
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
publishedSegments = new CopyOnWriteArrayList<>();
ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerSubtypes(LinearShardSpec.class);
mapper.registerSubtypes(NumberedShardSpec.class);
IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
Set<DataSegment> result = super.announceHistoricalSegments(segments);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result);
segments.forEach(s -> segmentLatch.countDown());
return result;
}
@Override
public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments, Set<DataSegment> segmentsToDrop, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
SegmentPublishResult result = super.announceHistoricalSegments(segments, segmentsToDrop, startMetadata, endMetadata);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result.getSegments());
result.getSegments().forEach(s -> segmentLatch.countDown());
return result;
}
};
taskLockbox = new TaskLockbox(taskStorage, mdc);
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, EMITTER, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), (query, future) -> {
// do nothing
})));
handOffCallbacks = new ConcurrentHashMap<>();
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
handoffLatch.countDown();
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final TestUtils testUtils = new TestUtils();
taskToolboxFactory = new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), taskActionClientFactory, EMITTER, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, () -> conglomerate, // queryExecutorService
DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.query.QueryRunnerFactoryConglomerate in project druid by druid-io.
the class SqlBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
final GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get("basic");
final DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build();
final PlannerConfig plannerConfig = new PlannerConfig();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
log.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
final QueryRunnerFactoryConglomerate conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(closer);
final SpecificSegmentsQuerySegmentWalker walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(dataSegment, index);
closer.register(walker);
final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
}
Aggregations