use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class PendingTaskBasedProvisioningStrategyTest method testProvisionAlert.
@Test
public void testProvisionAlert() throws Exception {
ServiceEmitter emitter = EasyMock.createMock(ServiceEmitter.class);
EmittingLogger.registerEmitter(emitter);
emitter.emit(EasyMock.<ServiceEventBuilder>anyObject());
EasyMock.expectLastCall();
EasyMock.replay(emitter);
EasyMock.expect(autoScaler.getMinNumWorkers()).andReturn(1).times(3);
EasyMock.expect(autoScaler.getMaxNumWorkers()).andReturn(2).times(1);
EasyMock.expect(autoScaler.ipToIdLookup(EasyMock.anyObject())).andReturn(new ArrayList<String>()).times(2);
EasyMock.expect(autoScaler.terminateWithIds(EasyMock.anyObject())).andReturn(null);
EasyMock.expect(autoScaler.provision()).andReturn(new AutoScalingData(Collections.singletonList("fake")));
EasyMock.replay(autoScaler);
RemoteTaskRunner runner = EasyMock.createMock(RemoteTaskRunner.class);
EasyMock.expect(runner.getPendingTaskPayloads()).andReturn(Collections.singletonList(NoopTask.create())).times(2);
EasyMock.expect(runner.getWorkers()).andReturn(Arrays.asList(new TestZkWorker(testTask, "http", "hi", "lo", MIN_VERSION, 1).toImmutable(), // Invalid version node
new TestZkWorker(testTask, "http", "h1", "n1", INVALID_VERSION).toImmutable(), // Invalid version node
new TestZkWorker(testTask, "http", "h2", "n1", INVALID_VERSION).toImmutable())).times(2);
EasyMock.expect(runner.getConfig()).andReturn(new RemoteTaskRunnerConfig());
EasyMock.replay(runner);
Provisioner provisioner = strategy.makeProvisioner(runner);
boolean provisionedSomething = provisioner.doProvision();
Assert.assertTrue(provisionedSomething);
Assert.assertTrue(provisioner.getStats().toList().size() == 1);
DateTime createdTime = provisioner.getStats().toList().get(0).getTimestamp();
Assert.assertTrue(provisioner.getStats().toList().get(0).getEvent() == ScalingStats.EVENT.PROVISION);
Thread.sleep(2000);
provisionedSomething = provisioner.doProvision();
Assert.assertFalse(provisionedSomething);
Assert.assertTrue(provisioner.getStats().toList().get(0).getEvent() == ScalingStats.EVENT.PROVISION);
DateTime anotherCreatedTime = provisioner.getStats().toList().get(0).getTimestamp();
Assert.assertTrue(createdTime.equals(anotherCreatedTime));
EasyMock.verify(autoScaler);
EasyMock.verify(emitter);
EasyMock.verify(runner);
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class KafkaIndexTaskTest method setupClass.
@BeforeClass
public static void setupClass() throws Exception {
emitter = new ServiceEmitter("service", "host", new NoopEmitter());
emitter.start();
EmittingLogger.registerEmitter(emitter);
zkServer = new TestingCluster(1);
zkServer.start();
kafkaServer = new TestBroker(zkServer.getConnectString(), null, 1, ImmutableMap.of("num.partitions", "2"));
kafkaServer.start();
taskExec = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool(Execs.makeThreadFactory("kafka-task-test-%d")));
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class MetricsEmittingQueryProcessingPoolTest method testPrioritizedExecutorDelegate.
@Test
public void testPrioritizedExecutorDelegate() {
PrioritizedExecutorService service = Mockito.mock(PrioritizedExecutorService.class);
Mockito.when(service.getQueueSize()).thenReturn(10);
ExecutorServiceMonitor monitor = new ExecutorServiceMonitor();
List<Event> events = new ArrayList<>();
MetricsEmittingQueryProcessingPool processingPool = new MetricsEmittingQueryProcessingPool(service, monitor);
Assert.assertSame(service, processingPool.delegate());
ServiceEmitter serviceEmitter = new ServiceEmitter("service", "host", Mockito.mock(Emitter.class)) {
@Override
public void emit(Event event) {
events.add(event);
}
};
monitor.doMonitor(serviceEmitter);
Assert.assertEquals(1, events.size());
Assert.assertEquals(((ServiceMetricEvent) (events.get(0))).getMetric(), "segment/scan/pending");
Assert.assertEquals(((ServiceMetricEvent) (events.get(0))).getValue(), 10);
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class KillDatasourceMetadata method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
long currentTimeMillis = System.currentTimeMillis();
if ((lastKillTime + period) < currentTimeMillis) {
lastKillTime = currentTimeMillis;
long timestamp = currentTimeMillis - retainDuration;
try {
// Datasource metadata only exists for datasource with supervisor
// To determine if datasource metadata is still active, we check if the supervisor for that particular datasource
// is still active or not
Map<String, SupervisorSpec> allActiveSupervisor = metadataSupervisorManager.getLatestActiveOnly();
Set<String> allDatasourceWithActiveSupervisor = allActiveSupervisor.values().stream().map(supervisorSpec -> supervisorSpec.getDataSources()).flatMap(Collection::stream).filter(datasource -> !Strings.isNullOrEmpty(datasource)).collect(Collectors.toSet());
// We exclude removing datasource metadata with active supervisor
int datasourceMetadataRemovedCount = indexerMetadataStorageCoordinator.removeDataSourceMetadataOlderThan(timestamp, allDatasourceWithActiveSupervisor);
ServiceEmitter emitter = params.getEmitter();
emitter.emit(new ServiceMetricEvent.Builder().build("metadata/kill/datasource/count", datasourceMetadataRemovedCount));
log.info("Finished running KillDatasourceMetadata duty. Removed %,d datasource metadata", datasourceMetadataRemovedCount);
} catch (Exception e) {
log.error(e, "Failed to kill datasource metadata");
}
}
return params;
}
use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.
the class KillSupervisors method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
long currentTimeMillis = System.currentTimeMillis();
if ((lastKillTime + period) < currentTimeMillis) {
lastKillTime = currentTimeMillis;
long timestamp = currentTimeMillis - retainDuration;
try {
int supervisorRemoved = metadataSupervisorManager.removeTerminatedSupervisorsOlderThan(timestamp);
ServiceEmitter emitter = params.getEmitter();
emitter.emit(new ServiceMetricEvent.Builder().build("metadata/kill/supervisor/count", supervisorRemoved));
log.info("Finished running KillSupervisors duty. Removed %,d supervisor specs", supervisorRemoved);
} catch (Exception e) {
log.error(e, "Failed to kill terminated supervisor metadata");
}
}
return params;
}
Aggregations