use of com.wavefront.agent.handlers.ReportableEntityHandler in project java by wavefrontHQ.
the class JaegerProtobufUtils method processSpan.
private static void processSpan(Model.Span span, String serviceName, String sourceName, String applicationName, String cluster, String shard, List<Annotation> processAnnotations, ReportableEntityHandler<Span, String> spanHandler, ReportableEntityHandler<SpanLogs, String> spanLogsHandler, @Nullable WavefrontInternalReporter wfInternalReporter, Supplier<Boolean> spanLogsDisabled, Supplier<ReportableEntityPreprocessor> preprocessorSupplier, SpanSampler sampler, Set<String> traceDerivedCustomTagKeys, Counter discardedSpansBySampler, Set<Pair<Map<String, String>, String>> discoveredHeartbeatMetrics) {
List<Annotation> annotations = new ArrayList<>(processAnnotations);
// serviceName is mandatory in Jaeger
annotations.add(new Annotation(SERVICE_TAG_KEY, serviceName));
String componentTagValue = NULL_TAG_VAL;
boolean isError = false;
if (span.getTagsList() != null) {
for (Model.KeyValue tag : span.getTagsList()) {
if (IGNORE_TAGS.contains(tag.getKey()) || (tag.getVType() == Model.ValueType.STRING && StringUtils.isBlank(tag.getVStr()))) {
continue;
}
Annotation annotation = tagToAnnotation(tag);
if (annotation != null) {
switch(annotation.getKey()) {
case APPLICATION_TAG_KEY:
applicationName = annotation.getValue();
continue;
case CLUSTER_TAG_KEY:
cluster = annotation.getValue();
continue;
case SHARD_TAG_KEY:
shard = annotation.getValue();
continue;
case SOURCE_KEY:
// Do not add source to annotation span tag list.
sourceName = annotation.getValue();
continue;
case SERVICE_TAG_KEY:
// Do not use service tag from annotations, use field instead
continue;
case COMPONENT_TAG_KEY:
componentTagValue = annotation.getValue();
break;
case ERROR_TAG_KEY:
// only error=true is supported
isError = annotation.getValue().equals(ERROR_SPAN_TAG_VAL);
break;
}
annotations.add(annotation);
}
}
}
// Add all wavefront indexed tags. These are set based on below hierarchy.
// Span Level > Process Level > Proxy Level > Default
annotations.add(new Annotation(APPLICATION_TAG_KEY, applicationName));
annotations.add(new Annotation(CLUSTER_TAG_KEY, cluster));
annotations.add(new Annotation(SHARD_TAG_KEY, shard));
if (span.getReferencesList() != null) {
for (Model.SpanRef reference : span.getReferencesList()) {
switch(reference.getRefType()) {
case CHILD_OF:
if (!reference.getSpanId().isEmpty()) {
annotations.add(new Annotation(TraceConstants.PARENT_KEY, toStringId(reference.getSpanId())));
}
break;
case FOLLOWS_FROM:
if (!reference.getSpanId().isEmpty()) {
annotations.add(new Annotation(TraceConstants.FOLLOWS_FROM_KEY, toStringId(reference.getSpanId())));
}
default:
}
}
}
if (!spanLogsDisabled.get() && span.getLogsCount() > 0) {
annotations.add(new Annotation("_spanLogs", "true"));
}
Span wavefrontSpan = Span.newBuilder().setCustomer("dummy").setName(span.getOperationName()).setSource(sourceName).setSpanId(toStringId(span.getSpanId())).setTraceId(toStringId(span.getTraceId())).setStartMillis(toMillis(span.getStartTime())).setDuration(toMillis(span.getDuration())).setAnnotations(annotations).build();
// Log Jaeger spans as well as Wavefront spans for debugging purposes.
if (JAEGER_DATA_LOGGER.isLoggable(Level.FINEST)) {
JAEGER_DATA_LOGGER.info("Inbound Jaeger span: " + span.toString());
JAEGER_DATA_LOGGER.info("Converted Wavefront span: " + wavefrontSpan.toString());
}
if (preprocessorSupplier != null) {
ReportableEntityPreprocessor preprocessor = preprocessorSupplier.get();
String[] messageHolder = new String[1];
preprocessor.forSpan().transform(wavefrontSpan);
if (!preprocessor.forSpan().filter(wavefrontSpan, messageHolder)) {
if (messageHolder[0] != null) {
spanHandler.reject(wavefrontSpan, messageHolder[0]);
} else {
spanHandler.block(wavefrontSpan);
}
return;
}
}
if (sampler.sample(wavefrontSpan, discardedSpansBySampler)) {
spanHandler.report(wavefrontSpan);
if (span.getLogsCount() > 0 && !isFeatureDisabled(spanLogsDisabled, SPANLOGS_DISABLED, null)) {
SpanLogs spanLogs = SpanLogs.newBuilder().setCustomer("default").setTraceId(wavefrontSpan.getTraceId()).setSpanId(wavefrontSpan.getSpanId()).setLogs(span.getLogsList().stream().map(x -> {
Map<String, String> fields = new HashMap<>(x.getFieldsCount());
x.getFieldsList().forEach(t -> {
switch(t.getVType()) {
case STRING:
fields.put(t.getKey(), t.getVStr());
break;
case BOOL:
fields.put(t.getKey(), String.valueOf(t.getVBool()));
break;
case INT64:
fields.put(t.getKey(), String.valueOf(t.getVInt64()));
break;
case FLOAT64:
fields.put(t.getKey(), String.valueOf(t.getVFloat64()));
break;
case BINARY:
// ignore
default:
}
});
return SpanLog.newBuilder().setTimestamp(toMicros(x.getTimestamp())).setFields(fields).build();
}).collect(Collectors.toList())).build();
spanLogsHandler.report(spanLogs);
}
}
// report stats irrespective of span sampling.
if (wfInternalReporter != null) {
// Set post preprocessor rule values and report converted metrics/histograms from the span
List<Annotation> processedAnnotations = wavefrontSpan.getAnnotations();
for (Annotation processedAnnotation : processedAnnotations) {
switch(processedAnnotation.getKey()) {
case APPLICATION_TAG_KEY:
applicationName = processedAnnotation.getValue();
continue;
case SERVICE_TAG_KEY:
serviceName = processedAnnotation.getValue();
continue;
case CLUSTER_TAG_KEY:
cluster = processedAnnotation.getValue();
continue;
case SHARD_TAG_KEY:
shard = processedAnnotation.getValue();
continue;
case COMPONENT_TAG_KEY:
componentTagValue = processedAnnotation.getValue();
continue;
case ERROR_TAG_KEY:
isError = processedAnnotation.getValue().equals(ERROR_SPAN_TAG_VAL);
continue;
}
}
List<Pair<String, String>> spanTags = processedAnnotations.stream().map(a -> new Pair<>(a.getKey(), a.getValue())).collect(Collectors.toList());
discoveredHeartbeatMetrics.add(reportWavefrontGeneratedData(wfInternalReporter, wavefrontSpan.getName(), applicationName, serviceName, cluster, shard, wavefrontSpan.getSource(), componentTagValue, isError, toMicros(span.getDuration()), traceDerivedCustomTagKeys, spanTags, true));
}
}
use of com.wavefront.agent.handlers.ReportableEntityHandler in project java by wavefrontHQ.
the class JaegerThriftUtils method processSpan.
private static void processSpan(io.jaegertracing.thriftjava.Span span, String serviceName, String sourceName, String applicationName, String cluster, String shard, List<Annotation> processAnnotations, ReportableEntityHandler<Span, String> spanHandler, ReportableEntityHandler<SpanLogs, String> spanLogsHandler, @Nullable WavefrontInternalReporter wfInternalReporter, Supplier<Boolean> spanLogsDisabled, Supplier<ReportableEntityPreprocessor> preprocessorSupplier, SpanSampler sampler, Set<String> traceDerivedCustomTagKeys, Counter discardedSpansBySampler, Set<Pair<Map<String, String>, String>> discoveredHeartbeatMetrics) {
List<Annotation> annotations = new ArrayList<>(processAnnotations);
String traceId = new UUID(span.getTraceIdHigh(), span.getTraceIdLow()).toString();
String strippedTraceId = StringUtils.stripStart(traceId.replace("-", ""), "0");
strippedTraceId = strippedTraceId.length() > 0 ? strippedTraceId : "0";
annotations.add(new Annotation("jaegerSpanId", Long.toHexString(span.getSpanId())));
annotations.add(new Annotation("jaegerTraceId", strippedTraceId));
// serviceName is mandatory in Jaeger
annotations.add(new Annotation(SERVICE_TAG_KEY, serviceName));
long parentSpanId = span.getParentSpanId();
if (parentSpanId != 0) {
annotations.add(new Annotation("parent", new UUID(0, parentSpanId).toString()));
}
String componentTagValue = NULL_TAG_VAL;
boolean isError = false;
if (span.getTags() != null) {
for (Tag tag : span.getTags()) {
if (IGNORE_TAGS.contains(tag.getKey()) || (tag.vType == TagType.STRING && StringUtils.isBlank(tag.getVStr()))) {
continue;
}
Annotation annotation = tagToAnnotation(tag);
if (annotation != null) {
switch(annotation.getKey()) {
case APPLICATION_TAG_KEY:
applicationName = annotation.getValue();
continue;
case CLUSTER_TAG_KEY:
cluster = annotation.getValue();
continue;
case SHARD_TAG_KEY:
shard = annotation.getValue();
continue;
case SOURCE_KEY:
// Do not add source to annotation span tag list.
sourceName = annotation.getValue();
continue;
case SERVICE_TAG_KEY:
// Do not use service tag from annotations, use field instead
continue;
case COMPONENT_TAG_KEY:
componentTagValue = annotation.getValue();
break;
case ERROR_TAG_KEY:
// only error=true is supported
isError = annotation.getValue().equals(ERROR_SPAN_TAG_VAL);
break;
}
annotations.add(annotation);
}
}
}
// Add all wavefront indexed tags. These are set based on below hierarchy.
// Span Level > Process Level > Proxy Level > Default
annotations.add(new Annotation(APPLICATION_TAG_KEY, applicationName));
annotations.add(new Annotation(CLUSTER_TAG_KEY, cluster));
annotations.add(new Annotation(SHARD_TAG_KEY, shard));
if (span.getReferences() != null) {
for (SpanRef reference : span.getReferences()) {
switch(reference.refType) {
case CHILD_OF:
if (reference.getSpanId() != 0 && reference.getSpanId() != parentSpanId) {
annotations.add(new Annotation(TraceConstants.PARENT_KEY, new UUID(0, reference.getSpanId()).toString()));
}
case FOLLOWS_FROM:
if (reference.getSpanId() != 0) {
annotations.add(new Annotation(TraceConstants.FOLLOWS_FROM_KEY, new UUID(0, reference.getSpanId()).toString()));
}
default:
}
}
}
if (!spanLogsDisabled.get() && span.getLogs() != null && !span.getLogs().isEmpty()) {
annotations.add(new Annotation("_spanLogs", "true"));
}
Span wavefrontSpan = Span.newBuilder().setCustomer("dummy").setName(span.getOperationName()).setSource(sourceName).setSpanId(new UUID(0, span.getSpanId()).toString()).setTraceId(traceId).setStartMillis(span.getStartTime() / 1000).setDuration(span.getDuration() / 1000).setAnnotations(annotations).build();
// Log Jaeger spans as well as Wavefront spans for debugging purposes.
if (JAEGER_DATA_LOGGER.isLoggable(Level.FINEST)) {
JAEGER_DATA_LOGGER.info("Inbound Jaeger span: " + span.toString());
JAEGER_DATA_LOGGER.info("Converted Wavefront span: " + wavefrontSpan.toString());
}
if (preprocessorSupplier != null) {
ReportableEntityPreprocessor preprocessor = preprocessorSupplier.get();
String[] messageHolder = new String[1];
preprocessor.forSpan().transform(wavefrontSpan);
if (!preprocessor.forSpan().filter(wavefrontSpan, messageHolder)) {
if (messageHolder[0] != null) {
spanHandler.reject(wavefrontSpan, messageHolder[0]);
} else {
spanHandler.block(wavefrontSpan);
}
return;
}
}
if (sampler.sample(wavefrontSpan, discardedSpansBySampler)) {
spanHandler.report(wavefrontSpan);
if (span.getLogs() != null && !span.getLogs().isEmpty() && !isFeatureDisabled(spanLogsDisabled, SPANLOGS_DISABLED, null)) {
SpanLogs spanLogs = SpanLogs.newBuilder().setCustomer("default").setTraceId(wavefrontSpan.getTraceId()).setSpanId(wavefrontSpan.getSpanId()).setLogs(span.getLogs().stream().map(x -> {
Map<String, String> fields = new HashMap<>(x.fields.size());
x.fields.forEach(t -> {
switch(t.vType) {
case STRING:
fields.put(t.getKey(), t.getVStr());
break;
case BOOL:
fields.put(t.getKey(), String.valueOf(t.isVBool()));
break;
case LONG:
fields.put(t.getKey(), String.valueOf(t.getVLong()));
break;
case DOUBLE:
fields.put(t.getKey(), String.valueOf(t.getVDouble()));
break;
case BINARY:
// ignore
default:
}
});
return SpanLog.newBuilder().setTimestamp(x.timestamp).setFields(fields).build();
}).collect(Collectors.toList())).build();
spanLogsHandler.report(spanLogs);
}
}
// report stats irrespective of span sampling.
if (wfInternalReporter != null) {
// Set post preprocessor rule values and report converted metrics/histograms from the span
List<Annotation> processedAnnotations = wavefrontSpan.getAnnotations();
for (Annotation processedAnnotation : processedAnnotations) {
switch(processedAnnotation.getKey()) {
case APPLICATION_TAG_KEY:
applicationName = processedAnnotation.getValue();
continue;
case SERVICE_TAG_KEY:
serviceName = processedAnnotation.getValue();
continue;
case CLUSTER_TAG_KEY:
cluster = processedAnnotation.getValue();
continue;
case SHARD_TAG_KEY:
shard = processedAnnotation.getValue();
continue;
case COMPONENT_TAG_KEY:
componentTagValue = processedAnnotation.getValue();
continue;
case ERROR_TAG_KEY:
isError = processedAnnotation.getValue().equals(ERROR_SPAN_TAG_VAL);
continue;
}
}
List<Pair<String, String>> spanTags = processedAnnotations.stream().map(a -> new Pair<>(a.getKey(), a.getValue())).collect(Collectors.toList());
// TODO: Modify to use new method from wavefront internal reporter.
discoveredHeartbeatMetrics.add(reportWavefrontGeneratedData(wfInternalReporter, wavefrontSpan.getName(), applicationName, serviceName, cluster, shard, wavefrontSpan.getSource(), componentTagValue, isError, span.getDuration(), traceDerivedCustomTagKeys, spanTags, true));
}
}
use of com.wavefront.agent.handlers.ReportableEntityHandler in project java by wavefrontHQ.
the class LogsIngesterTest method setup.
private void setup(LogsIngestionConfig config) throws IOException, GrokException, ConfigurationException {
logsIngestionConfig = config;
// HACK: Never call flush automatically.
logsIngestionConfig.aggregationIntervalSeconds = 10000;
logsIngestionConfig.verifyAndInit();
mockPointHandler = createMock(ReportableEntityHandler.class);
mockHistogramHandler = createMock(ReportableEntityHandler.class);
mockFactory = createMock(ReportableEntityHandlerFactory.class);
expect((ReportableEntityHandler) mockFactory.getHandler(HandlerKey.of(ReportableEntityType.POINT, "logs-ingester"))).andReturn(mockPointHandler).anyTimes();
expect((ReportableEntityHandler) mockFactory.getHandler(HandlerKey.of(ReportableEntityType.HISTOGRAM, "logs-ingester"))).andReturn(mockHistogramHandler).anyTimes();
replay(mockFactory);
logsIngesterUnderTest = new LogsIngester(mockFactory, () -> logsIngestionConfig, null, now::get, nanos::get);
logsIngesterUnderTest.start();
filebeatIngesterUnderTest = new FilebeatIngester(logsIngesterUnderTest, now::get);
rawLogsIngesterUnderTest = new RawLogsIngesterPortUnificationHandler("12345", logsIngesterUnderTest, x -> "testHost", TokenAuthenticatorBuilder.create().build(), new NoopHealthCheckManager(), null);
}
use of com.wavefront.agent.handlers.ReportableEntityHandler in project java by wavefrontHQ.
the class PushAgent method startRelayListener.
@VisibleForTesting
protected void startRelayListener(String strPort, ReportableEntityHandlerFactory handlerFactory, SharedGraphiteHostAnnotator hostAnnotator) {
final int port = Integer.parseInt(strPort);
registerPrefixFilter(strPort);
registerTimestampFilter(strPort);
if (proxyConfig.isHttpHealthCheckAllPorts())
healthCheckManager.enableHealthcheck(port);
ReportableEntityHandlerFactory handlerFactoryDelegate = proxyConfig.isPushRelayHistogramAggregator() ? new DelegatingReportableEntityHandlerFactoryImpl(handlerFactory) {
@Override
public <T, U> ReportableEntityHandler<T, U> getHandler(HandlerKey handlerKey) {
if (handlerKey.getEntityType() == ReportableEntityType.HISTOGRAM) {
ChronicleMap<HistogramKey, AgentDigest> accumulator = ChronicleMap.of(HistogramKey.class, AgentDigest.class).keyMarshaller(HistogramKeyMarshaller.get()).valueMarshaller(AgentDigestMarshaller.get()).entries(proxyConfig.getPushRelayHistogramAggregatorAccumulatorSize()).averageKeySize(proxyConfig.getHistogramDistAvgKeyBytes()).averageValueSize(proxyConfig.getHistogramDistAvgDigestBytes()).maxBloatFactor(1000).create();
AgentDigestFactory agentDigestFactory = new AgentDigestFactory(() -> (short) Math.min(proxyConfig.getPushRelayHistogramAggregatorCompression(), entityProps.getGlobalProperties().getHistogramStorageAccuracy()), TimeUnit.SECONDS.toMillis(proxyConfig.getPushRelayHistogramAggregatorFlushSecs()), proxyConfig.getTimeProvider());
AccumulationCache cachedAccumulator = new AccumulationCache(accumulator, agentDigestFactory, 0, "histogram.accumulator.distributionRelay", null);
// noinspection unchecked
return (ReportableEntityHandler<T, U>) new HistogramAccumulationHandlerImpl(handlerKey, cachedAccumulator, proxyConfig.getPushBlockedSamples(), null, validationConfiguration, true, rate -> entityProps.get(ReportableEntityType.HISTOGRAM).reportReceivedRate(handlerKey.getHandle(), rate), blockedHistogramsLogger, VALID_HISTOGRAMS_LOGGER);
}
return delegate.getHandler(handlerKey);
}
} : handlerFactory;
Map<ReportableEntityType, ReportableEntityDecoder<?, ?>> filteredDecoders = decoderSupplier.get().entrySet().stream().filter(x -> !x.getKey().equals(ReportableEntityType.SOURCE_TAG)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
ChannelHandler channelHandler = new RelayPortUnificationHandler(strPort, tokenAuthenticator, healthCheckManager, filteredDecoders, handlerFactoryDelegate, preprocessors.get(strPort), hostAnnotator, () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE_SPAN_LOGS).isFeatureDisabled());
startAsManagedThread(port, new TcpIngester(createInitializer(channelHandler, port, proxyConfig.getPushListenerMaxReceivedLength(), proxyConfig.getPushListenerHttpBufferSize(), proxyConfig.getListenerIdleConnectionTimeout(), getSslContext(strPort), getCorsConfig(strPort)), port).withChildChannelOptions(childChannelOptions), "listener-relay-" + port);
}
use of com.wavefront.agent.handlers.ReportableEntityHandler in project java by wavefrontHQ.
the class PushAgent method startHistogramListeners.
protected void startHistogramListeners(List<String> ports, ReportableEntityHandler<ReportPoint, String> pointHandler, SharedGraphiteHostAnnotator hostAnnotator, @Nullable Granularity granularity, int flushSecs, boolean memoryCacheEnabled, File baseDirectory, Long accumulatorSize, int avgKeyBytes, int avgDigestBytes, short compression, boolean persist, SpanSampler sampler) throws Exception {
if (ports.size() == 0)
return;
String listenerBinType = HistogramUtils.granularityToString(granularity);
// Accumulator
if (persist) {
// Check directory
checkArgument(baseDirectory.isDirectory(), baseDirectory.getAbsolutePath() + " must be a directory!");
checkArgument(baseDirectory.canWrite(), baseDirectory.getAbsolutePath() + " must be write-able!");
}
MapLoader<HistogramKey, AgentDigest, HistogramKeyMarshaller, AgentDigestMarshaller> mapLoader = new MapLoader<>(HistogramKey.class, AgentDigest.class, accumulatorSize, avgKeyBytes, avgDigestBytes, HistogramKeyMarshaller.get(), AgentDigestMarshaller.get(), persist);
File accumulationFile = new File(baseDirectory, "accumulator." + listenerBinType);
ChronicleMap<HistogramKey, AgentDigest> accumulator = mapLoader.get(accumulationFile);
histogramExecutor.scheduleWithFixedDelay(() -> {
// as ChronicleMap starts losing efficiency
if (accumulator.size() > accumulatorSize * 5) {
logger.severe("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is more than 5x higher than currently configured size (" + accumulatorSize + "), which may cause severe performance degradation issues " + "or data loss! If the data volume is expected to stay at this level, we strongly " + "recommend increasing the value for accumulator size in wavefront.conf and " + "restarting the proxy.");
} else if (accumulator.size() > accumulatorSize * 2) {
logger.warning("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is more than 2x higher than currently configured size (" + accumulatorSize + "), which may cause performance issues. If the data volume is " + "expected to stay at this level, we strongly recommend increasing the value " + "for accumulator size in wavefront.conf and restarting the proxy.");
}
}, 10, 10, TimeUnit.SECONDS);
AgentDigestFactory agentDigestFactory = new AgentDigestFactory(() -> (short) Math.min(compression, entityProps.getGlobalProperties().getHistogramStorageAccuracy()), TimeUnit.SECONDS.toMillis(flushSecs), proxyConfig.getTimeProvider());
Accumulator cachedAccumulator = new AccumulationCache(accumulator, agentDigestFactory, (memoryCacheEnabled ? accumulatorSize : 0), "histogram.accumulator." + HistogramUtils.granularityToString(granularity), null);
// Schedule write-backs
histogramExecutor.scheduleWithFixedDelay(cachedAccumulator::flush, proxyConfig.getHistogramAccumulatorResolveInterval(), proxyConfig.getHistogramAccumulatorResolveInterval(), TimeUnit.MILLISECONDS);
histogramFlushRunnables.add(cachedAccumulator::flush);
PointHandlerDispatcher dispatcher = new PointHandlerDispatcher(cachedAccumulator, pointHandler, proxyConfig.getTimeProvider(), () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), proxyConfig.getHistogramAccumulatorFlushMaxBatchSize() < 0 ? null : proxyConfig.getHistogramAccumulatorFlushMaxBatchSize(), granularity);
histogramExecutor.scheduleWithFixedDelay(dispatcher, proxyConfig.getHistogramAccumulatorFlushInterval(), proxyConfig.getHistogramAccumulatorFlushInterval(), TimeUnit.MILLISECONDS);
histogramFlushRunnables.add(dispatcher);
// gracefully shutdown persisted accumulator (ChronicleMap) on proxy exit
shutdownTasks.add(() -> {
try {
logger.fine("Flushing in-flight histogram accumulator digests: " + listenerBinType);
cachedAccumulator.flush();
logger.fine("Shutting down histogram accumulator cache: " + listenerBinType);
accumulator.close();
} catch (Throwable t) {
logger.log(Level.SEVERE, "Error flushing " + listenerBinType + " accumulator, possibly unclean shutdown: ", t);
}
});
ReportableEntityHandlerFactory histogramHandlerFactory = new ReportableEntityHandlerFactory() {
private final Map<HandlerKey, ReportableEntityHandler<?, ?>> handlers = new ConcurrentHashMap<>();
@SuppressWarnings("unchecked")
@Override
public <T, U> ReportableEntityHandler<T, U> getHandler(HandlerKey handlerKey) {
return (ReportableEntityHandler<T, U>) handlers.computeIfAbsent(handlerKey, k -> new HistogramAccumulationHandlerImpl(handlerKey, cachedAccumulator, proxyConfig.getPushBlockedSamples(), granularity, validationConfiguration, granularity == null, null, blockedHistogramsLogger, VALID_HISTOGRAMS_LOGGER));
}
@Override
public void shutdown(@Nonnull String handle) {
handlers.values().forEach(ReportableEntityHandler::shutdown);
}
};
ports.forEach(strPort -> {
int port = Integer.parseInt(strPort);
registerPrefixFilter(strPort);
registerTimestampFilter(strPort);
if (proxyConfig.isHttpHealthCheckAllPorts()) {
healthCheckManager.enableHealthcheck(port);
}
WavefrontPortUnificationHandler wavefrontPortUnificationHandler = new WavefrontPortUnificationHandler(strPort, tokenAuthenticator, healthCheckManager, decoderSupplier.get(), histogramHandlerFactory, hostAnnotator, preprocessors.get(strPort), () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE_SPAN_LOGS).isFeatureDisabled(), sampler);
startAsManagedThread(port, new TcpIngester(createInitializer(wavefrontPortUnificationHandler, port, proxyConfig.getHistogramMaxReceivedLength(), proxyConfig.getHistogramHttpBufferSize(), proxyConfig.getListenerIdleConnectionTimeout(), getSslContext(strPort), getCorsConfig(strPort)), port).withChildChannelOptions(childChannelOptions), "listener-histogram-" + port);
logger.info("listening on port: " + port + " for histogram samples, accumulating to the " + listenerBinType);
});
}
Aggregations