use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class TaskLockboxTest method setup.
@Before
public void setup() {
objectMapper = TestHelper.makeJsonMapper();
objectMapper.registerSubtypes(NumberedShardSpec.class, HashBasedNumberedShardSpec.class);
final TestDerbyConnector derbyConnector = derby.getConnector();
derbyConnector.createTaskTables();
derbyConnector.createPendingSegmentsTable();
derbyConnector.createSegmentTable();
final MetadataStorageTablesConfig tablesConfig = derby.metadataTablesConfigSupplier().get();
taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new DerbyMetadataStorageActionHandlerFactory(derbyConnector, tablesConfig, objectMapper));
ServiceEmitter emitter = EasyMock.createMock(ServiceEmitter.class);
EmittingLogger.registerEmitter(emitter);
EasyMock.replay(emitter);
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(objectMapper, tablesConfig, derbyConnector);
lockbox = new TaskLockbox(taskStorage, metadataStorageCoordinator);
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class JvmMonitorTest method testGcCounts.
@Test(timeout = 60_000L)
public void testGcCounts() throws InterruptedException {
GcTrackingEmitter emitter = new GcTrackingEmitter();
final ServiceEmitter serviceEmitter = new ServiceEmitter("test", "localhost", emitter);
serviceEmitter.start();
final JvmMonitor jvmMonitor = new JvmMonitor();
// skip tests if gc counters fail to initialize with this JDK
Assume.assumeNotNull(jvmMonitor.gcCounters);
while (true) {
// generate some garbage to see gc counters incremented
@SuppressWarnings("unused") byte[] b = new byte[1024 * 1024 * 50];
emitter.reset();
jvmMonitor.doMonitor(serviceEmitter);
if (emitter.gcSeen()) {
return;
}
Thread.sleep(10);
}
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class MySQLMetadataStorageModuleTest method createInjector.
private Injector createInjector() {
MySQLMetadataStorageModule module = new MySQLMetadataStorageModule();
Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(new MetadataConfigModule(), new LifecycleModule(), module, new Module() {
@Override
public void configure(Binder binder) {
module.createBindingChoices(binder, "mysql");
}
@Provides
public ServiceEmitter getEmitter() {
return new ServiceEmitter("test", "localhost", new NoopEmitter());
}
}));
ObjectMapper mapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
mapper.registerModules(module.getJacksonModules());
return injector;
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class RemoteTaskRunnerTest method testRunTooMuchZKData.
@Test
public void testRunTooMuchZKData() throws Exception {
ServiceEmitter emitter = EasyMock.createMock(ServiceEmitter.class);
EmittingLogger.registerEmitter(emitter);
EasyMock.replay(emitter);
doSetup();
remoteTaskRunner.run(TestTasks.unending(new String(new char[5000])));
EasyMock.verify(emitter);
}
Aggregations