use of com.wavefront.agent.channel.SharedGraphiteHostAnnotator in project java by wavefrontHQ.
the class PushAgent method startListeners.
@Override
protected void startListeners() throws Exception {
blockedPointsLogger = Logger.getLogger(proxyConfig.getBlockedPointsLoggerName());
blockedHistogramsLogger = Logger.getLogger(proxyConfig.getBlockedHistogramsLoggerName());
blockedSpansLogger = Logger.getLogger(proxyConfig.getBlockedSpansLoggerName());
if (proxyConfig.getSoLingerTime() >= 0) {
childChannelOptions.put(ChannelOption.SO_LINGER, proxyConfig.getSoLingerTime());
}
hostnameResolver = new CachingHostnameLookupResolver(proxyConfig.isDisableRdnsLookup(), ExpectedAgentMetric.RDNS_CACHE_SIZE.metricName);
if (proxyConfig.isSqsQueueBuffer()) {
taskQueueFactory = new SQSQueueFactoryImpl(proxyConfig.getSqsQueueNameTemplate(), proxyConfig.getSqsQueueRegion(), proxyConfig.getSqsQueueIdentifier(), proxyConfig.isPurgeBuffer());
} else {
taskQueueFactory = new TaskQueueFactoryImpl(proxyConfig.getBufferFile(), proxyConfig.isPurgeBuffer(), proxyConfig.isDisableBufferSharding(), proxyConfig.getBufferShardSize());
}
remoteHostAnnotator = new SharedGraphiteHostAnnotator(proxyConfig.getCustomSourceTags(), hostnameResolver);
queueingFactory = new QueueingFactoryImpl(apiContainer, agentId, taskQueueFactory, entityProps);
senderTaskFactory = new SenderTaskFactoryImpl(apiContainer, agentId, taskQueueFactory, queueingFactory, entityProps);
if (proxyConfig.isHistogramPassthroughRecompression()) {
histogramRecompressor = new HistogramRecompressor(() -> entityProps.getGlobalProperties().getHistogramStorageAccuracy());
}
handlerFactory = new ReportableEntityHandlerFactoryImpl(senderTaskFactory, proxyConfig.getPushBlockedSamples(), validationConfiguration, blockedPointsLogger, blockedHistogramsLogger, blockedSpansLogger, histogramRecompressor, entityProps);
if (proxyConfig.isTrafficShaping()) {
new TrafficShapingRateLimitAdjuster(entityProps, proxyConfig.getTrafficShapingWindowSeconds(), proxyConfig.getTrafficShapingHeadroom()).start();
}
healthCheckManager = new HealthCheckManagerImpl(proxyConfig);
tokenAuthenticator = configureTokenAuthenticator();
shutdownTasks.add(() -> senderTaskFactory.shutdown());
shutdownTasks.add(() -> senderTaskFactory.drainBuffersToQueue(null));
// sampler for spans
rateSampler.setSamplingRate(entityProps.getGlobalProperties().getTraceSamplingRate());
Sampler durationSampler = SpanSamplerUtils.getDurationSampler(proxyConfig.getTraceSamplingDuration());
List<Sampler> samplers = SpanSamplerUtils.fromSamplers(rateSampler, durationSampler);
SpanSampler spanSampler = new SpanSampler(new CompositeSampler(samplers), () -> entityProps.getGlobalProperties().getActiveSpanSamplingPolicies());
if (proxyConfig.getAdminApiListenerPort() > 0) {
startAdminListener(proxyConfig.getAdminApiListenerPort());
}
csvToList(proxyConfig.getHttpHealthCheckPorts()).forEach(strPort -> startHealthCheckListener(Integer.parseInt(strPort)));
csvToList(proxyConfig.getPushListenerPorts()).forEach(strPort -> {
startGraphiteListener(strPort, handlerFactory, remoteHostAnnotator, spanSampler);
logger.info("listening on port: " + strPort + " for Wavefront metrics");
});
csvToList(proxyConfig.getDeltaCountersAggregationListenerPorts()).forEach(strPort -> {
startDeltaCounterListener(strPort, remoteHostAnnotator, senderTaskFactory, spanSampler);
logger.info("listening on port: " + strPort + " for Wavefront delta counter metrics");
});
{
// Histogram bootstrap.
List<String> histMinPorts = csvToList(proxyConfig.getHistogramMinuteListenerPorts());
List<String> histHourPorts = csvToList(proxyConfig.getHistogramHourListenerPorts());
List<String> histDayPorts = csvToList(proxyConfig.getHistogramDayListenerPorts());
List<String> histDistPorts = csvToList(proxyConfig.getHistogramDistListenerPorts());
int activeHistogramAggregationTypes = (histDayPorts.size() > 0 ? 1 : 0) + (histHourPorts.size() > 0 ? 1 : 0) + (histMinPorts.size() > 0 ? 1 : 0) + (histDistPorts.size() > 0 ? 1 : 0);
if (activeHistogramAggregationTypes > 0) {
/*Histograms enabled*/
histogramExecutor = Executors.newScheduledThreadPool(1 + activeHistogramAggregationTypes, new NamedThreadFactory("histogram-service"));
histogramFlushExecutor = Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors() / 2, new NamedThreadFactory("histogram-flush"));
managedExecutors.add(histogramExecutor);
managedExecutors.add(histogramFlushExecutor);
File baseDirectory = new File(proxyConfig.getHistogramStateDirectory());
// Central dispatch
ReportableEntityHandler<ReportPoint, String> pointHandler = handlerFactory.getHandler(HandlerKey.of(ReportableEntityType.HISTOGRAM, "histogram_ports"));
startHistogramListeners(histMinPorts, pointHandler, remoteHostAnnotator, Granularity.MINUTE, proxyConfig.getHistogramMinuteFlushSecs(), proxyConfig.isHistogramMinuteMemoryCache(), baseDirectory, proxyConfig.getHistogramMinuteAccumulatorSize(), proxyConfig.getHistogramMinuteAvgKeyBytes(), proxyConfig.getHistogramMinuteAvgDigestBytes(), proxyConfig.getHistogramMinuteCompression(), proxyConfig.isHistogramMinuteAccumulatorPersisted(), spanSampler);
startHistogramListeners(histHourPorts, pointHandler, remoteHostAnnotator, Granularity.HOUR, proxyConfig.getHistogramHourFlushSecs(), proxyConfig.isHistogramHourMemoryCache(), baseDirectory, proxyConfig.getHistogramHourAccumulatorSize(), proxyConfig.getHistogramHourAvgKeyBytes(), proxyConfig.getHistogramHourAvgDigestBytes(), proxyConfig.getHistogramHourCompression(), proxyConfig.isHistogramHourAccumulatorPersisted(), spanSampler);
startHistogramListeners(histDayPorts, pointHandler, remoteHostAnnotator, Granularity.DAY, proxyConfig.getHistogramDayFlushSecs(), proxyConfig.isHistogramDayMemoryCache(), baseDirectory, proxyConfig.getHistogramDayAccumulatorSize(), proxyConfig.getHistogramDayAvgKeyBytes(), proxyConfig.getHistogramDayAvgDigestBytes(), proxyConfig.getHistogramDayCompression(), proxyConfig.isHistogramDayAccumulatorPersisted(), spanSampler);
startHistogramListeners(histDistPorts, pointHandler, remoteHostAnnotator, null, proxyConfig.getHistogramDistFlushSecs(), proxyConfig.isHistogramDistMemoryCache(), baseDirectory, proxyConfig.getHistogramDistAccumulatorSize(), proxyConfig.getHistogramDistAvgKeyBytes(), proxyConfig.getHistogramDistAvgDigestBytes(), proxyConfig.getHistogramDistCompression(), proxyConfig.isHistogramDistAccumulatorPersisted(), spanSampler);
}
}
if (StringUtils.isNotBlank(proxyConfig.getGraphitePorts()) || StringUtils.isNotBlank(proxyConfig.getPicklePorts())) {
if (tokenAuthenticator.authRequired()) {
logger.warning("Graphite mode is not compatible with HTTP authentication, ignoring");
} else {
Preconditions.checkNotNull(proxyConfig.getGraphiteFormat(), "graphiteFormat must be supplied to enable graphite support");
Preconditions.checkNotNull(proxyConfig.getGraphiteDelimiters(), "graphiteDelimiters must be supplied to enable graphite support");
GraphiteFormatter graphiteFormatter = new GraphiteFormatter(proxyConfig.getGraphiteFormat(), proxyConfig.getGraphiteDelimiters(), proxyConfig.getGraphiteFieldsToRemove());
csvToList(proxyConfig.getGraphitePorts()).forEach(strPort -> {
preprocessors.getSystemPreprocessor(strPort).forPointLine().addTransformer(0, graphiteFormatter);
startGraphiteListener(strPort, handlerFactory, null, spanSampler);
logger.info("listening on port: " + strPort + " for graphite metrics");
});
csvToList(proxyConfig.getPicklePorts()).forEach(strPort -> startPickleListener(strPort, handlerFactory, graphiteFormatter));
}
}
csvToList(proxyConfig.getOpentsdbPorts()).forEach(strPort -> startOpenTsdbListener(strPort, handlerFactory));
if (proxyConfig.getDataDogJsonPorts() != null) {
HttpClient httpClient = HttpClientBuilder.create().useSystemProperties().setUserAgent(proxyConfig.getHttpUserAgent()).setConnectionTimeToLive(1, TimeUnit.MINUTES).setMaxConnPerRoute(100).setMaxConnTotal(100).setRetryHandler(new DefaultHttpRequestRetryHandler(proxyConfig.getHttpAutoRetries(), true)).setDefaultRequestConfig(RequestConfig.custom().setContentCompressionEnabled(true).setRedirectsEnabled(true).setConnectTimeout(proxyConfig.getHttpConnectTimeout()).setConnectionRequestTimeout(proxyConfig.getHttpConnectTimeout()).setSocketTimeout(proxyConfig.getHttpRequestTimeout()).build()).build();
csvToList(proxyConfig.getDataDogJsonPorts()).forEach(strPort -> startDataDogListener(strPort, handlerFactory, httpClient));
}
csvToList(proxyConfig.getTraceListenerPorts()).forEach(strPort -> startTraceListener(strPort, handlerFactory, spanSampler));
csvToList(proxyConfig.getCustomTracingListenerPorts()).forEach(strPort -> startCustomTracingListener(strPort, handlerFactory, new InternalProxyWavefrontClient(handlerFactory, strPort), spanSampler));
csvToList(proxyConfig.getTraceJaegerListenerPorts()).forEach(strPort -> {
PreprocessorRuleMetrics ruleMetrics = new PreprocessorRuleMetrics(Metrics.newCounter(new TaggedMetricName("point.spanSanitize", "count", "port", strPort)), null, null);
preprocessors.getSystemPreprocessor(strPort).forSpan().addTransformer(new SpanSanitizeTransformer(ruleMetrics));
startTraceJaegerListener(strPort, handlerFactory, new InternalProxyWavefrontClient(handlerFactory, strPort), spanSampler);
});
csvToList(proxyConfig.getTraceJaegerGrpcListenerPorts()).forEach(strPort -> {
PreprocessorRuleMetrics ruleMetrics = new PreprocessorRuleMetrics(Metrics.newCounter(new TaggedMetricName("point.spanSanitize", "count", "port", strPort)), null, null);
preprocessors.getSystemPreprocessor(strPort).forSpan().addTransformer(new SpanSanitizeTransformer(ruleMetrics));
startTraceJaegerGrpcListener(strPort, handlerFactory, new InternalProxyWavefrontClient(handlerFactory, strPort), spanSampler);
});
csvToList(proxyConfig.getTraceJaegerHttpListenerPorts()).forEach(strPort -> {
PreprocessorRuleMetrics ruleMetrics = new PreprocessorRuleMetrics(Metrics.newCounter(new TaggedMetricName("point.spanSanitize", "count", "port", strPort)), null, null);
preprocessors.getSystemPreprocessor(strPort).forSpan().addTransformer(new SpanSanitizeTransformer(ruleMetrics));
startTraceJaegerHttpListener(strPort, handlerFactory, new InternalProxyWavefrontClient(handlerFactory, strPort), spanSampler);
});
csvToList(proxyConfig.getTraceZipkinListenerPorts()).forEach(strPort -> {
PreprocessorRuleMetrics ruleMetrics = new PreprocessorRuleMetrics(Metrics.newCounter(new TaggedMetricName("point.spanSanitize", "count", "port", strPort)), null, null);
preprocessors.getSystemPreprocessor(strPort).forSpan().addTransformer(new SpanSanitizeTransformer(ruleMetrics));
startTraceZipkinListener(strPort, handlerFactory, new InternalProxyWavefrontClient(handlerFactory, strPort), spanSampler);
});
csvToList(proxyConfig.getPushRelayListenerPorts()).forEach(strPort -> startRelayListener(strPort, handlerFactory, remoteHostAnnotator));
csvToList(proxyConfig.getJsonListenerPorts()).forEach(strPort -> startJsonListener(strPort, handlerFactory));
csvToList(proxyConfig.getWriteHttpJsonListenerPorts()).forEach(strPort -> startWriteHttpJsonListener(strPort, handlerFactory));
// Logs ingestion.
if (proxyConfig.getFilebeatPort() > 0 || proxyConfig.getRawLogsPort() > 0) {
if (loadLogsIngestionConfig() != null) {
logger.info("Initializing logs ingestion");
try {
final LogsIngester logsIngester = new LogsIngester(handlerFactory, this::loadLogsIngestionConfig, proxyConfig.getPrefix());
logsIngester.start();
if (proxyConfig.getFilebeatPort() > 0) {
startLogsIngestionListener(proxyConfig.getFilebeatPort(), logsIngester);
}
if (proxyConfig.getRawLogsPort() > 0) {
startRawLogsIngestionListener(proxyConfig.getRawLogsPort(), logsIngester);
}
} catch (ConfigurationException e) {
logger.log(Level.SEVERE, "Cannot start logsIngestion", e);
}
} else {
logger.warning("Cannot start logsIngestion: invalid configuration or no config specified");
}
}
setupMemoryGuard();
}
use of com.wavefront.agent.channel.SharedGraphiteHostAnnotator in project java by wavefrontHQ.
the class PushAgent method startDeltaCounterListener.
@VisibleForTesting
protected void startDeltaCounterListener(String strPort, SharedGraphiteHostAnnotator hostAnnotator, SenderTaskFactory senderTaskFactory, SpanSampler sampler) {
final int port = Integer.parseInt(strPort);
registerPrefixFilter(strPort);
registerTimestampFilter(strPort);
if (proxyConfig.isHttpHealthCheckAllPorts())
healthCheckManager.enableHealthcheck(port);
if (this.deltaCounterHandlerFactory == null) {
this.deltaCounterHandlerFactory = new ReportableEntityHandlerFactory() {
private final Map<String, ReportableEntityHandler<?, ?>> handlers = new ConcurrentHashMap<>();
@Override
public <T, U> ReportableEntityHandler<T, U> getHandler(HandlerKey handlerKey) {
// noinspection unchecked
return (ReportableEntityHandler<T, U>) handlers.computeIfAbsent(handlerKey.getHandle(), k -> new DeltaCounterAccumulationHandlerImpl(handlerKey, proxyConfig.getPushBlockedSamples(), senderTaskFactory.createSenderTasks(handlerKey), validationConfiguration, proxyConfig.getDeltaCountersAggregationIntervalSeconds(), rate -> entityProps.get(ReportableEntityType.POINT).reportReceivedRate(handlerKey.getHandle(), rate), blockedPointsLogger, VALID_POINTS_LOGGER));
}
@Override
public void shutdown(@Nonnull String handle) {
if (handlers.containsKey(handle)) {
handlers.values().forEach(ReportableEntityHandler::shutdown);
}
}
};
}
shutdownTasks.add(() -> deltaCounterHandlerFactory.shutdown(strPort));
WavefrontPortUnificationHandler wavefrontPortUnificationHandler = new WavefrontPortUnificationHandler(strPort, tokenAuthenticator, healthCheckManager, decoderSupplier.get(), deltaCounterHandlerFactory, hostAnnotator, preprocessors.get(strPort), () -> false, () -> false, () -> false, sampler);
startAsManagedThread(port, new TcpIngester(createInitializer(wavefrontPortUnificationHandler, port, proxyConfig.getPushListenerMaxReceivedLength(), proxyConfig.getPushListenerHttpBufferSize(), proxyConfig.getListenerIdleConnectionTimeout(), getSslContext(strPort), getCorsConfig(strPort)), port).withChildChannelOptions(childChannelOptions), "listener-deltaCounter-" + port);
}
use of com.wavefront.agent.channel.SharedGraphiteHostAnnotator in project java by wavefrontHQ.
the class PushAgent method startRelayListener.
@VisibleForTesting
protected void startRelayListener(String strPort, ReportableEntityHandlerFactory handlerFactory, SharedGraphiteHostAnnotator hostAnnotator) {
final int port = Integer.parseInt(strPort);
registerPrefixFilter(strPort);
registerTimestampFilter(strPort);
if (proxyConfig.isHttpHealthCheckAllPorts())
healthCheckManager.enableHealthcheck(port);
ReportableEntityHandlerFactory handlerFactoryDelegate = proxyConfig.isPushRelayHistogramAggregator() ? new DelegatingReportableEntityHandlerFactoryImpl(handlerFactory) {
@Override
public <T, U> ReportableEntityHandler<T, U> getHandler(HandlerKey handlerKey) {
if (handlerKey.getEntityType() == ReportableEntityType.HISTOGRAM) {
ChronicleMap<HistogramKey, AgentDigest> accumulator = ChronicleMap.of(HistogramKey.class, AgentDigest.class).keyMarshaller(HistogramKeyMarshaller.get()).valueMarshaller(AgentDigestMarshaller.get()).entries(proxyConfig.getPushRelayHistogramAggregatorAccumulatorSize()).averageKeySize(proxyConfig.getHistogramDistAvgKeyBytes()).averageValueSize(proxyConfig.getHistogramDistAvgDigestBytes()).maxBloatFactor(1000).create();
AgentDigestFactory agentDigestFactory = new AgentDigestFactory(() -> (short) Math.min(proxyConfig.getPushRelayHistogramAggregatorCompression(), entityProps.getGlobalProperties().getHistogramStorageAccuracy()), TimeUnit.SECONDS.toMillis(proxyConfig.getPushRelayHistogramAggregatorFlushSecs()), proxyConfig.getTimeProvider());
AccumulationCache cachedAccumulator = new AccumulationCache(accumulator, agentDigestFactory, 0, "histogram.accumulator.distributionRelay", null);
// noinspection unchecked
return (ReportableEntityHandler<T, U>) new HistogramAccumulationHandlerImpl(handlerKey, cachedAccumulator, proxyConfig.getPushBlockedSamples(), null, validationConfiguration, true, rate -> entityProps.get(ReportableEntityType.HISTOGRAM).reportReceivedRate(handlerKey.getHandle(), rate), blockedHistogramsLogger, VALID_HISTOGRAMS_LOGGER);
}
return delegate.getHandler(handlerKey);
}
} : handlerFactory;
Map<ReportableEntityType, ReportableEntityDecoder<?, ?>> filteredDecoders = decoderSupplier.get().entrySet().stream().filter(x -> !x.getKey().equals(ReportableEntityType.SOURCE_TAG)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
ChannelHandler channelHandler = new RelayPortUnificationHandler(strPort, tokenAuthenticator, healthCheckManager, filteredDecoders, handlerFactoryDelegate, preprocessors.get(strPort), hostAnnotator, () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE_SPAN_LOGS).isFeatureDisabled());
startAsManagedThread(port, new TcpIngester(createInitializer(channelHandler, port, proxyConfig.getPushListenerMaxReceivedLength(), proxyConfig.getPushListenerHttpBufferSize(), proxyConfig.getListenerIdleConnectionTimeout(), getSslContext(strPort), getCorsConfig(strPort)), port).withChildChannelOptions(childChannelOptions), "listener-relay-" + port);
}
use of com.wavefront.agent.channel.SharedGraphiteHostAnnotator in project java by wavefrontHQ.
the class PushAgent method startHistogramListeners.
protected void startHistogramListeners(List<String> ports, ReportableEntityHandler<ReportPoint, String> pointHandler, SharedGraphiteHostAnnotator hostAnnotator, @Nullable Granularity granularity, int flushSecs, boolean memoryCacheEnabled, File baseDirectory, Long accumulatorSize, int avgKeyBytes, int avgDigestBytes, short compression, boolean persist, SpanSampler sampler) throws Exception {
if (ports.size() == 0)
return;
String listenerBinType = HistogramUtils.granularityToString(granularity);
// Accumulator
if (persist) {
// Check directory
checkArgument(baseDirectory.isDirectory(), baseDirectory.getAbsolutePath() + " must be a directory!");
checkArgument(baseDirectory.canWrite(), baseDirectory.getAbsolutePath() + " must be write-able!");
}
MapLoader<HistogramKey, AgentDigest, HistogramKeyMarshaller, AgentDigestMarshaller> mapLoader = new MapLoader<>(HistogramKey.class, AgentDigest.class, accumulatorSize, avgKeyBytes, avgDigestBytes, HistogramKeyMarshaller.get(), AgentDigestMarshaller.get(), persist);
File accumulationFile = new File(baseDirectory, "accumulator." + listenerBinType);
ChronicleMap<HistogramKey, AgentDigest> accumulator = mapLoader.get(accumulationFile);
histogramExecutor.scheduleWithFixedDelay(() -> {
// as ChronicleMap starts losing efficiency
if (accumulator.size() > accumulatorSize * 5) {
logger.severe("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is more than 5x higher than currently configured size (" + accumulatorSize + "), which may cause severe performance degradation issues " + "or data loss! If the data volume is expected to stay at this level, we strongly " + "recommend increasing the value for accumulator size in wavefront.conf and " + "restarting the proxy.");
} else if (accumulator.size() > accumulatorSize * 2) {
logger.warning("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is more than 2x higher than currently configured size (" + accumulatorSize + "), which may cause performance issues. If the data volume is " + "expected to stay at this level, we strongly recommend increasing the value " + "for accumulator size in wavefront.conf and restarting the proxy.");
}
}, 10, 10, TimeUnit.SECONDS);
AgentDigestFactory agentDigestFactory = new AgentDigestFactory(() -> (short) Math.min(compression, entityProps.getGlobalProperties().getHistogramStorageAccuracy()), TimeUnit.SECONDS.toMillis(flushSecs), proxyConfig.getTimeProvider());
Accumulator cachedAccumulator = new AccumulationCache(accumulator, agentDigestFactory, (memoryCacheEnabled ? accumulatorSize : 0), "histogram.accumulator." + HistogramUtils.granularityToString(granularity), null);
// Schedule write-backs
histogramExecutor.scheduleWithFixedDelay(cachedAccumulator::flush, proxyConfig.getHistogramAccumulatorResolveInterval(), proxyConfig.getHistogramAccumulatorResolveInterval(), TimeUnit.MILLISECONDS);
histogramFlushRunnables.add(cachedAccumulator::flush);
PointHandlerDispatcher dispatcher = new PointHandlerDispatcher(cachedAccumulator, pointHandler, proxyConfig.getTimeProvider(), () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), proxyConfig.getHistogramAccumulatorFlushMaxBatchSize() < 0 ? null : proxyConfig.getHistogramAccumulatorFlushMaxBatchSize(), granularity);
histogramExecutor.scheduleWithFixedDelay(dispatcher, proxyConfig.getHistogramAccumulatorFlushInterval(), proxyConfig.getHistogramAccumulatorFlushInterval(), TimeUnit.MILLISECONDS);
histogramFlushRunnables.add(dispatcher);
// gracefully shutdown persisted accumulator (ChronicleMap) on proxy exit
shutdownTasks.add(() -> {
try {
logger.fine("Flushing in-flight histogram accumulator digests: " + listenerBinType);
cachedAccumulator.flush();
logger.fine("Shutting down histogram accumulator cache: " + listenerBinType);
accumulator.close();
} catch (Throwable t) {
logger.log(Level.SEVERE, "Error flushing " + listenerBinType + " accumulator, possibly unclean shutdown: ", t);
}
});
ReportableEntityHandlerFactory histogramHandlerFactory = new ReportableEntityHandlerFactory() {
private final Map<HandlerKey, ReportableEntityHandler<?, ?>> handlers = new ConcurrentHashMap<>();
@SuppressWarnings("unchecked")
@Override
public <T, U> ReportableEntityHandler<T, U> getHandler(HandlerKey handlerKey) {
return (ReportableEntityHandler<T, U>) handlers.computeIfAbsent(handlerKey, k -> new HistogramAccumulationHandlerImpl(handlerKey, cachedAccumulator, proxyConfig.getPushBlockedSamples(), granularity, validationConfiguration, granularity == null, null, blockedHistogramsLogger, VALID_HISTOGRAMS_LOGGER));
}
@Override
public void shutdown(@Nonnull String handle) {
handlers.values().forEach(ReportableEntityHandler::shutdown);
}
};
ports.forEach(strPort -> {
int port = Integer.parseInt(strPort);
registerPrefixFilter(strPort);
registerTimestampFilter(strPort);
if (proxyConfig.isHttpHealthCheckAllPorts()) {
healthCheckManager.enableHealthcheck(port);
}
WavefrontPortUnificationHandler wavefrontPortUnificationHandler = new WavefrontPortUnificationHandler(strPort, tokenAuthenticator, healthCheckManager, decoderSupplier.get(), histogramHandlerFactory, hostAnnotator, preprocessors.get(strPort), () -> entityProps.get(ReportableEntityType.HISTOGRAM).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE).isFeatureDisabled(), () -> entityProps.get(ReportableEntityType.TRACE_SPAN_LOGS).isFeatureDisabled(), sampler);
startAsManagedThread(port, new TcpIngester(createInitializer(wavefrontPortUnificationHandler, port, proxyConfig.getHistogramMaxReceivedLength(), proxyConfig.getHistogramHttpBufferSize(), proxyConfig.getListenerIdleConnectionTimeout(), getSslContext(strPort), getCorsConfig(strPort)), port).withChildChannelOptions(childChannelOptions), "listener-histogram-" + port);
logger.info("listening on port: " + port + " for histogram samples, accumulating to the " + listenerBinType);
});
}
Aggregations