use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class SQLMetadataRuleManagerTest method setUp.
@Before
public void setUp() {
connector = derbyConnectorRule.getConnector();
tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
connector.createAuditTable();
auditManager = new SQLAuditManager(connector, Suppliers.ofInstance(tablesConfig), new NoopServiceEmitter(), mapper, new SQLAuditManagerConfig());
connector.createRulesTable();
ruleManager = new SQLMetadataRuleManager(mapper, new MetadataRuleManagerConfig(), tablesConfig, connector, auditManager);
connector.createSegmentTable();
publisher = new SQLMetadataSegmentPublisher(jsonMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), connector);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class QueryRunnerBasedOnClusteredClientTestBase method setupTestBase.
@Before
public void setupTestBase() {
segmentGenerator = new SegmentGenerator();
httpClient = new TestHttpClient(objectMapper);
simpleServerView = new SimpleServerView(toolChestWarehouse, objectMapper, httpClient);
cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, simpleServerView, MapCache.create(0), objectMapper, new ForegroundCachePopulator(objectMapper, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), QueryStackTests.getProcessingConfig(USE_PARALLEL_MERGE_POOL_CONFIGURED, DruidProcessingConfig.DEFAULT_NUM_MERGE_BUFFERS), ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
servers = new ArrayList<>();
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class LookupReferencesManagerTest method setUp.
@Before
public void setUp() throws IOException {
EmittingLogger.registerEmitter(new NoopServiceEmitter());
druidLeaderClient = EasyMock.createMock(DruidLeaderClient.class);
config = EasyMock.createMock(LookupListeningAnnouncerConfig.class);
lookupExtractorFactory = new MapLookupExtractorFactory(ImmutableMap.of("key", "value"), true);
container = new LookupExtractorFactoryContainer("v0", lookupExtractorFactory);
mapper.registerSubtypes(MapLookupExtractorFactory.class);
String temporaryPath = temporaryFolder.newFolder().getAbsolutePath();
lookupReferencesManager = new LookupReferencesManager(new LookupConfig(temporaryFolder.newFolder().getAbsolutePath()), mapper, druidLeaderClient, config, true);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class DirectDruidClientTest method testRun.
@Test
public void testRun() throws Exception {
final URL url = new URL(StringUtils.format("http://%s/druid/v2/", hostName));
SettableFuture<InputStream> futureResult = SettableFuture.create();
Capture<Request> capturedRequest = EasyMock.newCapture();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject(), EasyMock.anyObject(Duration.class))).andReturn(futureResult).times(1);
SettableFuture futureException = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject(), EasyMock.anyObject(Duration.class))).andReturn(futureException).times(1);
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject(), EasyMock.anyObject(Duration.class))).andReturn(SettableFuture.create()).atLeastOnce();
EasyMock.replay(httpClient);
DirectDruidClient client2 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "http", "foo2", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer2 = new QueryableDruidServer(new DruidServer("test1", "localhost", null, 0, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0), client2);
serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
query = query.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, Long.MAX_VALUE));
Sequence s1 = client.run(QueryPlus.wrap(query));
Assert.assertTrue(capturedRequest.hasCaptured());
Assert.assertEquals(url, capturedRequest.getValue().getUrl());
Assert.assertEquals(HttpMethod.POST, capturedRequest.getValue().getMethod());
Assert.assertEquals(1, client.getNumOpenConnections());
// simulate read timeout
client.run(QueryPlus.wrap(query));
Assert.assertEquals(2, client.getNumOpenConnections());
futureException.setException(new ReadTimeoutException());
Assert.assertEquals(1, client.getNumOpenConnections());
// subsequent connections should work
client.run(QueryPlus.wrap(query));
client.run(QueryPlus.wrap(query));
client.run(QueryPlus.wrap(query));
Assert.assertTrue(client.getNumOpenConnections() == 4);
// produce result for first connection
futureResult.set(new ByteArrayInputStream(StringUtils.toUtf8("[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]")));
List<Result> results = s1.toList();
Assert.assertEquals(1, results.size());
Assert.assertEquals(DateTimes.of("2014-01-01T01:02:03Z"), results.get(0).getTimestamp());
Assert.assertEquals(3, client.getNumOpenConnections());
client2.run(QueryPlus.wrap(query));
client2.run(QueryPlus.wrap(query));
Assert.assertEquals(2, client2.getNumOpenConnections());
Assert.assertEquals(serverSelector.pick(null), queryableDruidServer2);
EasyMock.verify(httpClient);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class CachingClusteredClientPerfTest method testGetQueryRunnerForSegments_singleIntervalLargeSegments.
@Test(timeout = 10_000)
public void testGetQueryRunnerForSegments_singleIntervalLargeSegments() {
final int segmentCount = 30_000;
final Interval interval = Intervals.of("2021-02-13/2021-02-14");
final List<SegmentDescriptor> segmentDescriptors = new ArrayList<>(segmentCount);
final List<DataSegment> dataSegments = new ArrayList<>(segmentCount);
final VersionedIntervalTimeline<String, ServerSelector> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
final DruidServer server = new DruidServer("server", "localhost:9000", null, Long.MAX_VALUE, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_PRIORITY);
for (int ii = 0; ii < segmentCount; ii++) {
segmentDescriptors.add(new SegmentDescriptor(interval, "1", ii));
DataSegment segment = makeDataSegment("test", interval, "1", ii);
dataSegments.add(segment);
}
timeline.addAll(Iterators.transform(dataSegments.iterator(), segment -> {
ServerSelector ss = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
ss.addServerAndUpdateSegment(new QueryableDruidServer(server, new MockQueryRunner()), segment);
return new VersionedIntervalTimeline.PartitionChunkEntry<>(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(ss));
}));
TimelineServerView serverView = Mockito.mock(TimelineServerView.class);
QueryScheduler queryScheduler = Mockito.mock(QueryScheduler.class);
// mock scheduler to return same sequence as argument
Mockito.when(queryScheduler.run(any(), any())).thenAnswer(i -> i.getArgument(1));
Mockito.when(queryScheduler.prioritizeAndLaneQuery(any(), any())).thenAnswer(i -> ((QueryPlus) i.getArgument(0)).getQuery());
Mockito.doReturn(Optional.of(timeline)).when(serverView).getTimeline(any());
Mockito.doReturn(new MockQueryRunner()).when(serverView).getQueryRunner(any());
CachingClusteredClient cachingClusteredClient = new CachingClusteredClient(new MockQueryToolChestWareHouse(), serverView, MapCache.create(1024), TestHelper.makeJsonMapper(), Mockito.mock(CachePopulator.class), new CacheConfig(), Mockito.mock(DruidHttpClientConfig.class), Mockito.mock(DruidProcessingConfig.class), ForkJoinPool.commonPool(), queryScheduler, NoopJoinableFactory.INSTANCE, new NoopServiceEmitter());
Query<SegmentDescriptor> fakeQuery = makeFakeQuery(interval);
QueryRunner<SegmentDescriptor> queryRunner = cachingClusteredClient.getQueryRunnerForSegments(fakeQuery, segmentDescriptors);
Sequence<SegmentDescriptor> sequence = queryRunner.run(QueryPlus.wrap(fakeQuery));
Assert.assertEquals(segmentDescriptors, sequence.toList());
}
Aggregations