Search in sources :

Example 1 with TapeDeck

use of com.wavefront.agent.histogram.tape.TapeDeck in project java by wavefrontHQ.

the class PushAgent method startListeners.

@Override
protected void startListeners() {
    if (soLingerTime >= 0) {
        childChannelOptions.put(ChannelOption.SO_LINGER, soLingerTime);
    }
    if (pushListenerPorts != null) {
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(pushListenerPorts);
        for (String strPort : ports) {
            startGraphiteListener(strPort, false);
        }
    }
    {
        // Histogram bootstrap.
        Iterator<String> histMinPorts = Strings.isNullOrEmpty(histogramMinuteListenerPorts) ? Collections.emptyIterator() : Splitter.on(",").omitEmptyStrings().trimResults().split(histogramMinuteListenerPorts).iterator();
        Iterator<String> histHourPorts = Strings.isNullOrEmpty(histogramHourListenerPorts) ? Collections.emptyIterator() : Splitter.on(",").omitEmptyStrings().trimResults().split(histogramHourListenerPorts).iterator();
        Iterator<String> histDayPorts = Strings.isNullOrEmpty(histogramDayListenerPorts) ? Collections.emptyIterator() : Splitter.on(",").omitEmptyStrings().trimResults().split(histogramDayListenerPorts).iterator();
        Iterator<String> histDistPorts = Strings.isNullOrEmpty(histogramDistListenerPorts) ? Collections.emptyIterator() : Splitter.on(",").omitEmptyStrings().trimResults().split(histogramDistListenerPorts).iterator();
        int activeHistogramAggregationTypes = (histDayPorts.hasNext() ? 1 : 0) + (histHourPorts.hasNext() ? 1 : 0) + (histMinPorts.hasNext() ? 1 : 0) + (histDistPorts.hasNext() ? 1 : 0);
        if (activeHistogramAggregationTypes > 0) {
            /*Histograms enabled*/
            histogramExecutor = Executors.newScheduledThreadPool(1 + activeHistogramAggregationTypes, new NamedThreadFactory("histogram-service"));
            histogramFlushExecutor = Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors() / 2, new NamedThreadFactory("histogram-flush"));
            histogramScanExecutor = Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors() / 2, new NamedThreadFactory("histogram-scan"));
            managedExecutors.add(histogramExecutor);
            managedExecutors.add(histogramFlushExecutor);
            managedExecutors.add(histogramScanExecutor);
            File baseDirectory = new File(histogramStateDirectory);
            if (persistMessages || persistAccumulator) {
                // Check directory
                checkArgument(baseDirectory.isDirectory(), baseDirectory.getAbsolutePath() + " must be a directory!");
                checkArgument(baseDirectory.canWrite(), baseDirectory.getAbsolutePath() + " must be write-able!");
            }
            // Central dispatch
            PointHandler histogramHandler = new PointHandlerImpl("histogram ports", pushValidationLevel, pushBlockedSamples, prefix, getFlushTasks(Constants.PUSH_FORMAT_HISTOGRAM, "histogram ports"));
            // Input queue factory
            TapeDeck<List<String>> accumulatorDeck = new TapeDeck<>(persistMessagesCompression ? TapeStringListConverter.getCompressionEnabledInstance() : TapeStringListConverter.getDefaultInstance(), persistMessages);
            // Decoders
            Decoder<String> sampleDecoder = new GraphiteDecoder("unknown", customSourceTags);
            Decoder<String> distributionDecoder = new HistogramDecoder("unknown");
            if (histMinPorts.hasNext()) {
                startHistogramListeners(histMinPorts, sampleDecoder, histogramHandler, accumulatorDeck, "minute", histogramMinuteFlushSecs, histogramMinuteAccumulators, histogramMinuteMemoryCache, baseDirectory, histogramMinuteAccumulatorSize, histogramMinuteAvgKeyBytes, histogramMinuteAvgDigestBytes, histogramMinuteCompression);
            }
            if (histHourPorts.hasNext()) {
                startHistogramListeners(histHourPorts, sampleDecoder, histogramHandler, accumulatorDeck, "hour", histogramHourFlushSecs, histogramHourAccumulators, histogramHourMemoryCache, baseDirectory, histogramHourAccumulatorSize, histogramHourAvgKeyBytes, histogramHourAvgDigestBytes, histogramHourCompression);
            }
            if (histDayPorts.hasNext()) {
                startHistogramListeners(histDayPorts, sampleDecoder, histogramHandler, accumulatorDeck, "day", histogramDayFlushSecs, histogramDayAccumulators, histogramDayMemoryCache, baseDirectory, histogramDayAccumulatorSize, histogramDayAvgKeyBytes, histogramDayAvgDigestBytes, histogramDayCompression);
            }
            if (histDistPorts.hasNext()) {
                startHistogramListeners(histDistPorts, distributionDecoder, histogramHandler, accumulatorDeck, "distribution", histogramDistFlushSecs, histogramDistAccumulators, histogramDistMemoryCache, baseDirectory, histogramDistAccumulatorSize, histogramDistAvgKeyBytes, histogramDistAvgDigestBytes, histogramDistCompression);
            }
        }
    }
    GraphiteFormatter graphiteFormatter = null;
    if (graphitePorts != null || picklePorts != null) {
        Preconditions.checkNotNull(graphiteFormat, "graphiteFormat must be supplied to enable graphite support");
        Preconditions.checkNotNull(graphiteDelimiters, "graphiteDelimiters must be supplied to enable graphite support");
        graphiteFormatter = new GraphiteFormatter(graphiteFormat, graphiteDelimiters, graphiteFieldsToRemove);
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(graphitePorts);
        for (String strPort : ports) {
            preprocessors.forPort(strPort).forPointLine().addTransformer(0, graphiteFormatter);
            startGraphiteListener(strPort, true);
            logger.info("listening on port: " + strPort + " for graphite metrics");
        }
    }
    if (opentsdbPorts != null) {
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(opentsdbPorts);
        for (String strPort : ports) {
            startOpenTsdbListener(strPort);
            logger.info("listening on port: " + strPort + " for OpenTSDB metrics");
        }
    }
    if (picklePorts != null) {
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(picklePorts);
        for (String strPort : ports) {
            startPickleListener(strPort, graphiteFormatter);
            logger.info("listening on port: " + strPort + " for pickle protocol metrics");
        }
    }
    if (httpJsonPorts != null) {
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(httpJsonPorts);
        for (String strPort : ports) {
            preprocessors.forPort(strPort).forReportPoint().addFilter(new ReportPointTimestampInRangeFilter(dataBackfillCutoffHours, dataPrefillCutoffHours));
            startAsManagedThread(() -> {
                activeListeners.inc();
                try {
                    org.eclipse.jetty.server.Server server = new org.eclipse.jetty.server.Server(Integer.parseInt(strPort));
                    server.setHandler(new JsonMetricsEndpoint(strPort, hostname, prefix, pushValidationLevel, pushBlockedSamples, getFlushTasks(strPort), preprocessors.forPort(strPort)));
                    server.start();
                    server.join();
                } catch (InterruptedException e) {
                    logger.warning("Http Json server interrupted.");
                } catch (Exception e) {
                    if (e instanceof BindException) {
                        logger.severe("Unable to start listener - port " + String.valueOf(strPort) + " is already in use!");
                    }
                } finally {
                    activeListeners.dec();
                }
            }, "listener-plaintext-json-" + strPort);
        }
    }
    if (writeHttpJsonPorts != null) {
        Iterable<String> ports = Splitter.on(",").omitEmptyStrings().trimResults().split(writeHttpJsonPorts);
        for (String strPort : ports) {
            preprocessors.forPort(strPort).forReportPoint().addFilter(new ReportPointTimestampInRangeFilter(dataBackfillCutoffHours, dataPrefillCutoffHours));
            startAsManagedThread(() -> {
                activeListeners.inc();
                try {
                    org.eclipse.jetty.server.Server server = new org.eclipse.jetty.server.Server(Integer.parseInt(strPort));
                    server.setHandler(new WriteHttpJsonMetricsEndpoint(strPort, hostname, prefix, pushValidationLevel, pushBlockedSamples, getFlushTasks(strPort), preprocessors.forPort(strPort)));
                    server.start();
                    server.join();
                } catch (InterruptedException e) {
                    logger.warning("WriteHttpJson server interrupted.");
                } catch (Exception e) {
                    if (e instanceof BindException) {
                        logger.severe("Unable to start listener - port " + String.valueOf(strPort) + " is already in use!");
                    }
                } finally {
                    activeListeners.dec();
                }
            }, "listener-plaintext-writehttpjson-" + strPort);
        }
    }
    // Logs ingestion.
    if (loadLogsIngestionConfig() != null) {
        logger.info("Loading logs ingestion.");
        try {
            final LogsIngester logsIngester = new LogsIngester(new PointHandlerImpl("logs-ingester", pushValidationLevel, pushBlockedSamples, getFlushTasks("logs-ingester")), this::loadLogsIngestionConfig, prefix, System::currentTimeMillis);
            logsIngester.start();
            if (filebeatPort > 0) {
                final Server filebeatServer = new Server(filebeatPort);
                filebeatServer.setMessageListener(new FilebeatIngester(logsIngester, System::currentTimeMillis));
                startAsManagedThread(() -> {
                    try {
                        activeListeners.inc();
                        filebeatServer.listen();
                    } catch (InterruptedException e) {
                        logger.log(Level.SEVERE, "Filebeat server interrupted.", e);
                    } catch (Exception e) {
                        // ChannelFuture throws undeclared checked exceptions, so we need to handle it
                        if (e instanceof BindException) {
                            logger.severe("Unable to start listener - port " + String.valueOf(rawLogsPort) + " is already in use!");
                        }
                    } finally {
                        activeListeners.dec();
                    }
                }, "listener-logs-filebeat-" + filebeatPort);
            }
            if (rawLogsPort > 0) {
                RawLogsIngester rawLogsIngester = new RawLogsIngester(logsIngester, rawLogsPort, System::currentTimeMillis);
                startAsManagedThread(() -> {
                    try {
                        activeListeners.inc();
                        rawLogsIngester.listen();
                    } catch (InterruptedException e) {
                        logger.log(Level.SEVERE, "Raw logs server interrupted.", e);
                    } catch (Exception e) {
                        // ChannelFuture throws undeclared checked exceptions, so we need to handle it
                        if (e instanceof BindException) {
                            logger.severe("Unable to start listener - port " + String.valueOf(rawLogsPort) + " is already in use!");
                        }
                    } finally {
                        activeListeners.dec();
                    }
                }, "listener-logs-raw-" + rawLogsPort);
            }
        } catch (ConfigurationException e) {
            logger.log(Level.SEVERE, "Cannot start logsIngestion", e);
        }
    } else {
        logger.info("Not loading logs ingestion -- no config specified.");
    }
}
Also used : Server(org.logstash.beats.Server) GraphiteDecoder(com.wavefront.ingester.GraphiteDecoder) PickleProtocolDecoder(com.wavefront.ingester.PickleProtocolDecoder) Decoder(com.wavefront.ingester.Decoder) OpenTSDBDecoder(com.wavefront.ingester.OpenTSDBDecoder) HistogramDecoder(com.wavefront.ingester.HistogramDecoder) LengthFieldBasedFrameDecoder(io.netty.handler.codec.LengthFieldBasedFrameDecoder) ConfigurationException(com.wavefront.agent.config.ConfigurationException) Iterator(java.util.Iterator) HistogramDecoder(com.wavefront.ingester.HistogramDecoder) ReportPointTimestampInRangeFilter(com.wavefront.agent.preprocessor.ReportPointTimestampInRangeFilter) LogsIngester(com.wavefront.agent.logsharvesting.LogsIngester) RawLogsIngester(com.wavefront.agent.logsharvesting.RawLogsIngester) RawLogsIngester(com.wavefront.agent.logsharvesting.RawLogsIngester) FilebeatIngester(com.wavefront.agent.logsharvesting.FilebeatIngester) GraphiteFormatter(com.wavefront.agent.formatter.GraphiteFormatter) TapeDeck(com.wavefront.agent.histogram.tape.TapeDeck) BindException(java.net.BindException) BindException(java.net.BindException) ConfigurationException(com.wavefront.agent.config.ConfigurationException) IOException(java.io.IOException) File(java.io.File) GraphiteDecoder(com.wavefront.ingester.GraphiteDecoder)

Aggregations

ConfigurationException (com.wavefront.agent.config.ConfigurationException)1 GraphiteFormatter (com.wavefront.agent.formatter.GraphiteFormatter)1 TapeDeck (com.wavefront.agent.histogram.tape.TapeDeck)1 FilebeatIngester (com.wavefront.agent.logsharvesting.FilebeatIngester)1 LogsIngester (com.wavefront.agent.logsharvesting.LogsIngester)1 RawLogsIngester (com.wavefront.agent.logsharvesting.RawLogsIngester)1 ReportPointTimestampInRangeFilter (com.wavefront.agent.preprocessor.ReportPointTimestampInRangeFilter)1 Decoder (com.wavefront.ingester.Decoder)1 GraphiteDecoder (com.wavefront.ingester.GraphiteDecoder)1 HistogramDecoder (com.wavefront.ingester.HistogramDecoder)1 OpenTSDBDecoder (com.wavefront.ingester.OpenTSDBDecoder)1 PickleProtocolDecoder (com.wavefront.ingester.PickleProtocolDecoder)1 LengthFieldBasedFrameDecoder (io.netty.handler.codec.LengthFieldBasedFrameDecoder)1 File (java.io.File)1 IOException (java.io.IOException)1 BindException (java.net.BindException)1 Iterator (java.util.Iterator)1 Server (org.logstash.beats.Server)1