use of org.apache.druid.java.util.emitter.service.AlertEvent in project druid by druid-io.
the class KafkaEmitterTest method testKafkaEmitter.
// there is 1 seconds wait in kafka emitter before it starts sending events to broker, set a timeout for 5 seconds
@Test(timeout = 5_000)
public void testKafkaEmitter() throws InterruptedException {
final List<ServiceMetricEvent> serviceMetricEvents = ImmutableList.of(ServiceMetricEvent.builder().build("m1", 1).build("service", "host"));
final List<AlertEvent> alertEvents = ImmutableList.of(new AlertEvent("service", "host", "description"));
final List<RequestLogEvent> requestLogEvents = ImmutableList.of(DefaultRequestLogEventBuilderFactory.instance().createRequestLogEventBuilder("requests", RequestLogLine.forSql("", null, DateTimes.nowUtc(), null, new QueryStats(ImmutableMap.of()))).build("service", "host"));
int totalEvents = serviceMetricEvents.size() + alertEvents.size() + requestLogEvents.size();
int totalEventsExcludingRequestLogEvents = totalEvents - requestLogEvents.size();
final CountDownLatch countDownSentEvents = new CountDownLatch(requestTopic == null ? totalEventsExcludingRequestLogEvents : totalEvents);
final KafkaProducer<String, String> producer = mock(KafkaProducer.class);
final KafkaEmitter kafkaEmitter = new KafkaEmitter(new KafkaEmitterConfig("", "metrics", "alerts", requestTopic, "test-cluster", null), new ObjectMapper()) {
@Override
protected Producer<String, String> setKafkaProducer() {
// override send interval to 1 second
sendInterval = 1;
return producer;
}
};
when(producer.send(any(), any())).then((invocation) -> {
countDownSentEvents.countDown();
return null;
});
kafkaEmitter.start();
for (Event event : serviceMetricEvents) {
kafkaEmitter.emit(event);
}
for (Event event : alertEvents) {
kafkaEmitter.emit(event);
}
for (Event event : requestLogEvents) {
kafkaEmitter.emit(event);
}
countDownSentEvents.await();
Assert.assertEquals(0, kafkaEmitter.getMetricLostCount());
Assert.assertEquals(0, kafkaEmitter.getAlertLostCount());
Assert.assertEquals(requestTopic == null ? requestLogEvents.size() : 0, kafkaEmitter.getRequestLostCount());
Assert.assertEquals(0, kafkaEmitter.getInvalidLostCount());
}
use of org.apache.druid.java.util.emitter.service.AlertEvent in project druid by druid-io.
the class DropwizardEmitter method emit.
@Override
public void emit(Event event) {
synchronized (started) {
if (!started.get()) {
throw new RejectedExecutionException("Dropwizard emitter Service not started.");
}
}
if (event instanceof ServiceMetricEvent) {
ServiceMetricEvent metricEvent = (ServiceMetricEvent) event;
String host = metricEvent.getHost();
String service = metricEvent.getService();
String metric = metricEvent.getMetric();
Map<String, Object> userDims = metricEvent.getUserDims();
Number value = metricEvent.getValue();
ImmutableList.Builder<String> nameBuilder = new ImmutableList.Builder<>();
LinkedHashMap<String, String> dims = new LinkedHashMap<>();
final DropwizardMetricSpec metricSpec = converter.addFilteredUserDims(service, metric, userDims, dims);
if (metricSpec != null) {
if (config.getPrefix() != null) {
nameBuilder.add(config.getPrefix());
}
nameBuilder.add(StringUtils.format("metric=%s", metric));
nameBuilder.add(StringUtils.format("service=%s", service));
if (config.getIncludeHost()) {
nameBuilder.add(StringUtils.format("hostname=%s", host));
}
dims.forEach((key, value1) -> nameBuilder.add(StringUtils.format("%s=%s", key, value1)));
String fullName = StringUtils.replaceChar(Joiner.on(",").join(nameBuilder.build()), '/', ".");
updateMetric(fullName, value, metricSpec);
} else {
log.debug("Service=[%s], Metric=[%s] has no mapping", service, metric);
}
} else if (event instanceof AlertEvent) {
for (Emitter emitter : alertEmitters) {
emitter.emit(event);
}
} else {
throw new ISE("unknown event type [%s]", event.getClass());
}
}
Aggregations