Search in sources :

Example 1 with AgentDigest

use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.

the class PushAgent method startHistogramListeners.

protected void startHistogramListeners(Iterator<String> ports, Decoder<String> decoder, PointHandler pointHandler, TapeDeck<List<String>> receiveDeck, String listenerBinType, int flushSecs, int fanout, boolean memoryCacheEnabled, File baseDirectory, Long accumulatorSize, int avgKeyBytes, int avgDigestBytes, short compression) {
    // Accumulator
    MapLoader<HistogramKey, AgentDigest, HistogramKeyMarshaller, AgentDigestMarshaller> mapLoader = new MapLoader<>(HistogramKey.class, AgentDigest.class, accumulatorSize, avgKeyBytes, avgDigestBytes, HistogramKeyMarshaller.get(), AgentDigestMarshaller.get(), persistAccumulator);
    File accumulationFile = new File(baseDirectory, "accumulator." + listenerBinType);
    ChronicleMap<HistogramKey, AgentDigest> accumulator = mapLoader.get(accumulationFile);
    histogramExecutor.scheduleWithFixedDelay(() -> {
        // warn if accumulator is more than 1.5x the original size, as ChronicleMap starts losing efficiency
        if (accumulator.size() > accumulatorSize * 1.5) {
            logger.warning("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is much higher than configured size (" + accumulatorSize + "), proxy may experience performance issues or crash!");
        }
    }, 10, 10, TimeUnit.SECONDS);
    AccumulationCache cachedAccumulator = new AccumulationCache(accumulator, (memoryCacheEnabled ? accumulatorSize : 0), null);
    // Schedule write-backs
    histogramExecutor.scheduleWithFixedDelay(cachedAccumulator.getResolveTask(), histogramAccumulatorResolveInterval, histogramAccumulatorResolveInterval, TimeUnit.MILLISECONDS);
    PointHandlerDispatcher dispatcher = new PointHandlerDispatcher(cachedAccumulator, pointHandler, histogramAccumulatorFlushMaxBatchSize < 0 ? null : histogramAccumulatorFlushMaxBatchSize);
    histogramExecutor.scheduleWithFixedDelay(dispatcher, histogramAccumulatorFlushInterval, histogramAccumulatorFlushInterval, TimeUnit.MILLISECONDS);
    // gracefully shutdown persisted accumulator (ChronicleMap) on proxy exit
    shutdownTasks.add(() -> {
        try {
            logger.fine("Flushing in-flight histogram accumulator digests: " + listenerBinType);
            cachedAccumulator.getResolveTask().run();
            logger.fine("Shutting down histogram accumulator cache: " + listenerBinType);
            accumulator.close();
        } catch (Throwable t) {
            logger.log(Level.SEVERE, "Error flushing " + listenerBinType + " accumulator, possibly unclean shutdown: ", t);
        }
    });
    ports.forEachRemaining(port -> {
        startHistogramListener(port, decoder, pointHandler, cachedAccumulator, baseDirectory, (listenerBinType.equals("minute") ? Utils.Granularity.MINUTE : (listenerBinType.equals("hour") ? Utils.Granularity.HOUR : Utils.Granularity.DAY)), receiveDeck, TimeUnit.SECONDS.toMillis(flushSecs), fanout, compression);
        logger.info("listening on port: " + port + " for histogram samples, accumulating to the " + listenerBinType);
    });
}
Also used : HistogramKey(com.wavefront.agent.histogram.Utils.HistogramKey) AgentDigestMarshaller(com.tdunning.math.stats.AgentDigest.AgentDigestMarshaller) AccumulationCache(com.wavefront.agent.histogram.accumulator.AccumulationCache) MapLoader(com.wavefront.agent.histogram.MapLoader) PointHandlerDispatcher(com.wavefront.agent.histogram.PointHandlerDispatcher) AgentDigest(com.tdunning.math.stats.AgentDigest) HistogramKeyMarshaller(com.wavefront.agent.histogram.Utils.HistogramKeyMarshaller) File(java.io.File)

Example 2 with AgentDigest

use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.

the class AccumulationCache method put.

/**
 * Update {@code AgentDigest} in the cache with a double value. If such {@code AgentDigest} does not exist for
 * the specified key, it will be created with the specified compression and ttlMillis settings.
 *
 * @param key histogram key
 * @param value value to be merged into the {@code AgentDigest}
 * @param compression default compression level for new bins
 * @param ttlMillis default time-to-dispatch for new bins
 */
public void put(HistogramKey key, double value, short compression, long ttlMillis) {
    cache.asMap().compute(key, (k, v) -> {
        if (v == null) {
            binCreatedCounter.inc();
            AgentDigest t = new AgentDigest(compression, System.currentTimeMillis() + ttlMillis);
            keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < t.getDispatchTimeMillis() ? v1 : t.getDispatchTimeMillis()));
            t.add(value);
            return t;
        } else {
            keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < v.getDispatchTimeMillis() ? v1 : v.getDispatchTimeMillis()));
            v.add(value);
            return v;
        }
    });
}
Also used : AgentDigest(com.tdunning.math.stats.AgentDigest)

Example 3 with AgentDigest

use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.

the class AccumulationCache method put.

/**
 * Update {@code AgentDigest} in the cache with a {@code Histogram} value. If such {@code AgentDigest} does not exist
 * for the specified key, it will be created with the specified compression and ttlMillis settings.
 *
 * @param key histogram key
 * @param value a {@code Histogram} to be merged into the {@code AgentDigest}
 * @param compression default compression level for new bins
 * @param ttlMillis default time-to-dispatch in milliseconds for new bins
 */
public void put(HistogramKey key, Histogram value, short compression, long ttlMillis) {
    cache.asMap().compute(key, (k, v) -> {
        if (v == null) {
            binCreatedCounter.inc();
            AgentDigest t = new AgentDigest(compression, System.currentTimeMillis() + ttlMillis);
            keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < t.getDispatchTimeMillis() ? v1 : t.getDispatchTimeMillis()));
            mergeHistogram(t, value);
            return t;
        } else {
            keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < v.getDispatchTimeMillis() ? v1 : v.getDispatchTimeMillis()));
            mergeHistogram(v, value);
            return v;
        }
    });
}
Also used : AgentDigest(com.tdunning.math.stats.AgentDigest)

Example 4 with AgentDigest

use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.

the class MapLoaderTest method testCorruptedFileFallsBackToInMemory.

// NOTE: Chronicle's repair attempt takes >1min for whatever reason.
@Ignore
@Test
public void testCorruptedFileFallsBackToInMemory() throws IOException {
    FileOutputStream fos = new FileOutputStream(file);
    fos.write("Nonsense".getBytes());
    fos.flush();
    ConcurrentMap<HistogramKey, AgentDigest> map = loader.get(file);
    assertThat(((VanillaChronicleMap) map).file()).isNull();
    testPutRemove(map);
}
Also used : HistogramKey(com.wavefront.agent.histogram.Utils.HistogramKey) FileOutputStream(java.io.FileOutputStream) AgentDigest(com.tdunning.math.stats.AgentDigest) VanillaChronicleMap(net.openhft.chronicle.map.VanillaChronicleMap) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 5 with AgentDigest

use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.

the class PointHandlerDispatcherTest method setup.

@Before
public void setup() {
    timeMillis = new AtomicLong(0L);
    backingStore = new ConcurrentHashMap<>();
    in = new AccumulationCache(backingStore, 0, timeMillis::get);
    pointOut = new LinkedList<>();
    debugLineOut = new LinkedList<>();
    blockedOut = new LinkedList<>();
    digestA = new AgentDigest(COMPRESSION, 100L);
    digestB = new AgentDigest(COMPRESSION, 1000L);
    subject = new PointHandlerDispatcher(in, new PointHandler() {

        @Override
        public void reportPoint(ReportPoint point, String debugLine) {
            pointOut.add(point);
            debugLineOut.add(debugLine);
        }

        @Override
        public void reportPoints(List<ReportPoint> points) {
            pointOut.addAll(points);
        }

        @Override
        public void handleBlockedPoint(String pointLine) {
            blockedOut.add(pointLine);
        }
    }, timeMillis::get, null);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) AccumulationCache(com.wavefront.agent.histogram.accumulator.AccumulationCache) AgentDigest(com.tdunning.math.stats.AgentDigest) PointHandler(com.wavefront.agent.PointHandler) List(java.util.List) LinkedList(java.util.LinkedList) ReportPoint(wavefront.report.ReportPoint) Before(org.junit.Before)

Aggregations

AgentDigest (com.tdunning.math.stats.AgentDigest)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)3 Before (org.junit.Before)3 HistogramKey (com.wavefront.agent.histogram.Utils.HistogramKey)2 AccumulationCache (com.wavefront.agent.histogram.accumulator.AccumulationCache)2 AgentDigestMarshaller (com.tdunning.math.stats.AgentDigest.AgentDigestMarshaller)1 PointHandler (com.wavefront.agent.PointHandler)1 MapLoader (com.wavefront.agent.histogram.MapLoader)1 PointHandlerDispatcher (com.wavefront.agent.histogram.PointHandlerDispatcher)1 HistogramKeyMarshaller (com.wavefront.agent.histogram.Utils.HistogramKeyMarshaller)1 File (java.io.File)1 FileOutputStream (java.io.FileOutputStream)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 VanillaChronicleMap (net.openhft.chronicle.map.VanillaChronicleMap)1 Ignore (org.junit.Ignore)1 Test (org.junit.Test)1 ReportPoint (wavefront.report.ReportPoint)1