use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class RemoteTaskRunnerTest method setUp.
@Before
public void setUp() throws Exception {
rtrTestUtils.setUp();
jsonMapper = rtrTestUtils.getObjectMapper();
cf = rtrTestUtils.getCuratorFramework();
task = TestTasks.unending("task id with spaces");
EmittingLogger.registerEmitter(new NoopServiceEmitter());
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class WorkerTaskMonitorTest method createTaskMonitor.
private WorkerTaskMonitor createTaskMonitor() {
final TaskConfig taskConfig = new TaskConfig(FileUtils.createTempDir().toString(), null, null, 0, null, false, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
TaskActionClientFactory taskActionClientFactory = EasyMock.createNiceMock(TaskActionClientFactory.class);
TaskActionClient taskActionClient = EasyMock.createNiceMock(TaskActionClient.class);
EasyMock.expect(taskActionClientFactory.create(EasyMock.anyObject())).andReturn(taskActionClient).anyTimes();
SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
EasyMock.replay(taskActionClientFactory, taskActionClient, notifierFactory);
return new WorkerTaskMonitor(jsonMapper, new SingleTaskBackgroundRunner(new TaskToolboxFactory(taskConfig, null, taskActionClientFactory, null, null, null, null, null, null, null, notifierFactory, null, null, NoopJoinableFactory.INSTANCE, null, new SegmentCacheManagerFactory(jsonMapper), jsonMapper, indexIO, null, null, null, indexMergerV9, null, null, null, null, new NoopTestTaskReportFileWriter(), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null), taskConfig, new NoopServiceEmitter(), DUMMY_NODE, new ServerConfig()), taskConfig, cf, workerCuratorCoordinator, EasyMock.createNiceMock(DruidLeaderClient.class));
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class DatasourceOptimizerTest method setupViews.
private void setupViews() throws Exception {
baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
super.registerSegmentCallback(exec, new SegmentCallback() {
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
return callback.segmentAdded(server, segment);
}
@Override
public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
return callback.segmentRemoved(server, segment);
}
@Override
public CallbackAction segmentViewInitialized() {
return callback.segmentViewInitialized();
}
});
}
};
brokerServerView = new BrokerServerView(EasyMock.createMock(QueryToolChestWarehouse.class), EasyMock.createMock(QueryWatcher.class), getSmileMapper(), EasyMock.createMock(HttpClient.class), baseView, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()), new NoopServiceEmitter(), new BrokerSegmentWatcherConfig());
baseView.start();
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
use of org.apache.druid.server.metrics.NoopServiceEmitter in project druid by druid-io.
the class KafkaSupervisorTest method getSupervisor.
/**
* Use when you don't want generateSequenceNumber overridden
*/
private KafkaSupervisor getSupervisor(int replicas, int taskCount, boolean useEarliestOffset, String duration, Period lateMessageRejectionPeriod, Period earlyMessageRejectionPeriod, boolean suspended, String kafkaHost, DataSchema dataSchema, KafkaSupervisorTuningConfig tuningConfig) {
Map<String, Object> consumerProperties = new HashMap<>();
consumerProperties.put("myCustomKey", "myCustomValue");
consumerProperties.put("bootstrap.servers", kafkaHost);
consumerProperties.put("isolation.level", "read_committed");
KafkaSupervisorIOConfig kafkaSupervisorIOConfig = new KafkaSupervisorIOConfig(topic, INPUT_FORMAT, replicas, taskCount, new Period(duration), consumerProperties, null, KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, new Period("P1D"), new Period("PT30S"), useEarliestOffset, new Period("PT30M"), lateMessageRejectionPeriod, earlyMessageRejectionPeriod, null);
KafkaIndexTaskClientFactory taskClientFactory = new KafkaIndexTaskClientFactory(null, null) {
@Override
public KafkaIndexTaskClient build(TaskInfoProvider taskInfoProvider, String dataSource, int numThreads, Duration httpTimeout, long numRetries) {
Assert.assertEquals(TEST_CHAT_THREADS, numThreads);
Assert.assertEquals(TEST_HTTP_TIMEOUT.toStandardDuration(), httpTimeout);
Assert.assertEquals(TEST_CHAT_RETRIES, numRetries);
return taskClient;
}
};
return new KafkaSupervisor(taskStorage, taskMaster, indexerMetadataStorageCoordinator, taskClientFactory, OBJECT_MAPPER, new KafkaSupervisorSpec(null, dataSchema, tuningConfig, kafkaSupervisorIOConfig, null, suspended, taskStorage, taskMaster, indexerMetadataStorageCoordinator, taskClientFactory, OBJECT_MAPPER, new NoopServiceEmitter(), new DruidMonitorSchedulerConfig(), rowIngestionMetersFactory, supervisorConfig), rowIngestionMetersFactory);
}
Aggregations