use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.
the class PushAgent method startHistogramListeners.
protected void startHistogramListeners(Iterator<String> ports, Decoder<String> decoder, PointHandler pointHandler, TapeDeck<List<String>> receiveDeck, String listenerBinType, int flushSecs, int fanout, boolean memoryCacheEnabled, File baseDirectory, Long accumulatorSize, int avgKeyBytes, int avgDigestBytes, short compression) {
// Accumulator
MapLoader<HistogramKey, AgentDigest, HistogramKeyMarshaller, AgentDigestMarshaller> mapLoader = new MapLoader<>(HistogramKey.class, AgentDigest.class, accumulatorSize, avgKeyBytes, avgDigestBytes, HistogramKeyMarshaller.get(), AgentDigestMarshaller.get(), persistAccumulator);
File accumulationFile = new File(baseDirectory, "accumulator." + listenerBinType);
ChronicleMap<HistogramKey, AgentDigest> accumulator = mapLoader.get(accumulationFile);
histogramExecutor.scheduleWithFixedDelay(() -> {
// warn if accumulator is more than 1.5x the original size, as ChronicleMap starts losing efficiency
if (accumulator.size() > accumulatorSize * 1.5) {
logger.warning("Histogram " + listenerBinType + " accumulator size (" + accumulator.size() + ") is much higher than configured size (" + accumulatorSize + "), proxy may experience performance issues or crash!");
}
}, 10, 10, TimeUnit.SECONDS);
AccumulationCache cachedAccumulator = new AccumulationCache(accumulator, (memoryCacheEnabled ? accumulatorSize : 0), null);
// Schedule write-backs
histogramExecutor.scheduleWithFixedDelay(cachedAccumulator.getResolveTask(), histogramAccumulatorResolveInterval, histogramAccumulatorResolveInterval, TimeUnit.MILLISECONDS);
PointHandlerDispatcher dispatcher = new PointHandlerDispatcher(cachedAccumulator, pointHandler, histogramAccumulatorFlushMaxBatchSize < 0 ? null : histogramAccumulatorFlushMaxBatchSize);
histogramExecutor.scheduleWithFixedDelay(dispatcher, histogramAccumulatorFlushInterval, histogramAccumulatorFlushInterval, TimeUnit.MILLISECONDS);
// gracefully shutdown persisted accumulator (ChronicleMap) on proxy exit
shutdownTasks.add(() -> {
try {
logger.fine("Flushing in-flight histogram accumulator digests: " + listenerBinType);
cachedAccumulator.getResolveTask().run();
logger.fine("Shutting down histogram accumulator cache: " + listenerBinType);
accumulator.close();
} catch (Throwable t) {
logger.log(Level.SEVERE, "Error flushing " + listenerBinType + " accumulator, possibly unclean shutdown: ", t);
}
});
ports.forEachRemaining(port -> {
startHistogramListener(port, decoder, pointHandler, cachedAccumulator, baseDirectory, (listenerBinType.equals("minute") ? Utils.Granularity.MINUTE : (listenerBinType.equals("hour") ? Utils.Granularity.HOUR : Utils.Granularity.DAY)), receiveDeck, TimeUnit.SECONDS.toMillis(flushSecs), fanout, compression);
logger.info("listening on port: " + port + " for histogram samples, accumulating to the " + listenerBinType);
});
}
use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.
the class AccumulationCache method put.
/**
* Update {@code AgentDigest} in the cache with a double value. If such {@code AgentDigest} does not exist for
* the specified key, it will be created with the specified compression and ttlMillis settings.
*
* @param key histogram key
* @param value value to be merged into the {@code AgentDigest}
* @param compression default compression level for new bins
* @param ttlMillis default time-to-dispatch for new bins
*/
public void put(HistogramKey key, double value, short compression, long ttlMillis) {
cache.asMap().compute(key, (k, v) -> {
if (v == null) {
binCreatedCounter.inc();
AgentDigest t = new AgentDigest(compression, System.currentTimeMillis() + ttlMillis);
keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < t.getDispatchTimeMillis() ? v1 : t.getDispatchTimeMillis()));
t.add(value);
return t;
} else {
keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < v.getDispatchTimeMillis() ? v1 : v.getDispatchTimeMillis()));
v.add(value);
return v;
}
});
}
use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.
the class AccumulationCache method put.
/**
* Update {@code AgentDigest} in the cache with a {@code Histogram} value. If such {@code AgentDigest} does not exist
* for the specified key, it will be created with the specified compression and ttlMillis settings.
*
* @param key histogram key
* @param value a {@code Histogram} to be merged into the {@code AgentDigest}
* @param compression default compression level for new bins
* @param ttlMillis default time-to-dispatch in milliseconds for new bins
*/
public void put(HistogramKey key, Histogram value, short compression, long ttlMillis) {
cache.asMap().compute(key, (k, v) -> {
if (v == null) {
binCreatedCounter.inc();
AgentDigest t = new AgentDigest(compression, System.currentTimeMillis() + ttlMillis);
keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < t.getDispatchTimeMillis() ? v1 : t.getDispatchTimeMillis()));
mergeHistogram(t, value);
return t;
} else {
keyIndex.compute(key, (k1, v1) -> (v1 != null && v1 < v.getDispatchTimeMillis() ? v1 : v.getDispatchTimeMillis()));
mergeHistogram(v, value);
return v;
}
});
}
use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.
the class MapLoaderTest method testCorruptedFileFallsBackToInMemory.
// NOTE: Chronicle's repair attempt takes >1min for whatever reason.
@Ignore
@Test
public void testCorruptedFileFallsBackToInMemory() throws IOException {
FileOutputStream fos = new FileOutputStream(file);
fos.write("Nonsense".getBytes());
fos.flush();
ConcurrentMap<HistogramKey, AgentDigest> map = loader.get(file);
assertThat(((VanillaChronicleMap) map).file()).isNull();
testPutRemove(map);
}
use of com.tdunning.math.stats.AgentDigest in project java by wavefrontHQ.
the class PointHandlerDispatcherTest method setup.
@Before
public void setup() {
timeMillis = new AtomicLong(0L);
backingStore = new ConcurrentHashMap<>();
in = new AccumulationCache(backingStore, 0, timeMillis::get);
pointOut = new LinkedList<>();
debugLineOut = new LinkedList<>();
blockedOut = new LinkedList<>();
digestA = new AgentDigest(COMPRESSION, 100L);
digestB = new AgentDigest(COMPRESSION, 1000L);
subject = new PointHandlerDispatcher(in, new PointHandler() {
@Override
public void reportPoint(ReportPoint point, String debugLine) {
pointOut.add(point);
debugLineOut.add(debugLine);
}
@Override
public void reportPoints(List<ReportPoint> points) {
pointOut.addAll(points);
}
@Override
public void handleBlockedPoint(String pointLine) {
blockedOut.add(pointLine);
}
}, timeMillis::get, null);
}
Aggregations