use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class Router method get1mRate.
/**
* When outboundOnly is false, outbound rate in bytes per second.
* When true, max of inbound and outbound rate in bytes per second.
*/
public int get1mRate(boolean outboundOnly) {
int send = 0;
StatManager mgr = _context.statManager();
RateStat rs = mgr.getRate("bw.sendRate");
if (rs != null)
send = (int) rs.getRate(1 * 60 * 1000).getAverageValue();
if (outboundOnly)
return send;
int recv = 0;
rs = mgr.getRate("bw.recvRate");
if (rs != null)
recv = (int) rs.getRate(1 * 60 * 1000).getAverageValue();
return Math.max(send, recv);
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class RouterThrottleImpl method acceptTunnelRequest.
/**
* If we should send a reject, return a nonzero reject code.
* Anything that causes us to drop a request instead of rejecting it
* must go in BuildHandler.handleInboundRequest(), not here.
*
* @return 0 for accept or nonzero reject code
*/
public int acceptTunnelRequest() {
if (_context.router().gracefulShutdownInProgress()) {
if (_log.shouldLog(Log.WARN))
_log.warn("Refusing tunnel request since we are shutting down ASAP");
setShutdownStatus();
// Don't use CRIT because this tells everybody we are shutting down
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
// Don't use CRIT because we don't want peers to think we're failing
if (_context.router().getUptime() < DEFAULT_REJECT_STARTUP_TIME) {
setTunnelStatus(_x("Rejecting tunnels: Starting up"));
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
/**
** Moved to BuildHandler
* long lag = _context.jobQueue().getMaxLag();
* if (lag > JOB_LAG_LIMIT_TUNNEL) {
* if (_log.shouldLog(Log.WARN))
* _log.warn("Refusing tunnel request, as the job lag is " + lag);
* _context.statManager().addRateData("router.throttleTunnelCause", lag);
* setTunnelStatus(_x("Rejecting tunnels: High job lag"));
* return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
* }
***
*/
RateAverages ra = RateAverages.getTemp();
// TODO
// This stat is highly dependent on transport mix.
// For NTCP, it is queueing delay only, ~25ms
// For SSU it is queueing + ack time, ~1000 ms.
// (SSU acks may be delayed so it is much more than just RTT... and the delay may
// counterintuitively be more when there is low traffic)
// Change the stat or pick a better stat.
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
Rate r = null;
if (rs != null)
r = rs.getRate(60 * 1000);
// Reject tunnels if the time to process messages and send them is too large. Too much time implies congestion.
if (r != null) {
r.computeAverages(ra, false);
int maxProcessingTime = _context.getProperty(PROP_MAX_PROCESSINGTIME, DEFAULT_MAX_PROCESSINGTIME);
// Set throttling if necessary
if ((ra.getAverage() > maxProcessingTime * 0.9 || ra.getCurrent() > maxProcessingTime || ra.getLast() > maxProcessingTime)) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("Refusing tunnel request due to sendProcessingTime " + ((int) ra.getCurrent()) + " / " + ((int) ra.getLast()) + " / " + ((int) ra.getAverage()) + " / " + maxProcessingTime + " current/last/avg/max ms");
}
setTunnelStatus(_x("Rejecting tunnels: High message delay"));
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
}
int numTunnels = _context.tunnelManager().getParticipatingCount();
int maxTunnels = _context.getProperty(PROP_MAX_TUNNELS, DEFAULT_MAX_TUNNELS);
if (numTunnels >= maxTunnels) {
if (_log.shouldLog(Log.WARN))
_log.warn("Refusing tunnel request since we are already participating in " + numTunnels + " (our max is " + maxTunnels + ")");
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels);
setTunnelStatus(_x("Rejecting tunnels: Limit reached"));
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
/*
* Throttle if we go above a minimum level of tunnels AND the maximum participating
* tunnels is default or lower.
*
* Lag based statistics use a moving average window (of for example 10 minutes), they are therefore
* sensitive to sudden rapid growth of load, which are not instantly detected by these metrics.
* Reduce tunnel growth if we are growing faster than the lag based metrics can detect reliably.
*/
if ((numTunnels > getMinThrottleTunnels()) && (DEFAULT_MAX_TUNNELS >= maxTunnels)) {
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(10 * 60 * 1000);
if (avgTunnels != null) {
double avg = avgTunnels.getAvgOrLifetimeAvg();
double tunnelGrowthFactor = getTunnelGrowthFactor();
int min = getMinThrottleTunnels();
if (avg < min)
avg = min;
// if the current tunnel count is higher than 1.3 * the average...
if ((avg > 0) && (avg * tunnelGrowthFactor < numTunnels)) {
// we're accelerating, lets try not to take on too much too fast
double probAccept = (avg * tunnelGrowthFactor) / numTunnels;
// square the decelerator for tunnel counts
probAccept *= probAccept;
int v = _context.random().nextInt(100);
if (v < probAccept * 100) {
// ok
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically accept tunnel request (p=" + probAccept + " v=" + v + " avg=" + avg + " current=" + numTunnels + ")");
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Probabalistically refusing tunnel request (avg=" + avg + " current=" + numTunnels + ")");
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long) (numTunnels - avg));
// setTunnelStatus("Rejecting " + (100 - (int) probAccept*100) + "% of tunnels: High number of requests");
if (probAccept <= 0.5)
setTunnelStatus(_x("Rejecting most tunnels: High number of requests"));
else if (probAccept <= 0.9)
setTunnelStatus(_x("Accepting most tunnels"));
else
setTunnelStatus(_x("Accepting tunnels"));
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Accepting tunnel request, since the tunnel count average is " + avg + " and we only have " + numTunnels + ")");
}
}
}
double tunnelTestTimeGrowthFactor = getTunnelTestTimeGrowthFactor();
Rate tunnelTestTime1m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(1 * 60 * 1000);
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10 * 60 * 1000);
if ((tunnelTestTime1m != null) && (tunnelTestTime10m != null) && (tunnelTestTime1m.getLastEventCount() > 0)) {
double avg1m = tunnelTestTime1m.getAverageValue();
double avg10m = tunnelTestTime10m.getAvgOrLifetimeAvg();
if (avg10m < 5000)
// minimum before complaining
avg10m = 5000;
if ((avg10m > 0) && (avg1m > avg10m * tunnelTestTimeGrowthFactor)) {
double probAccept = (avg10m * tunnelTestTimeGrowthFactor) / avg1m;
// square the decelerator for test times
probAccept = probAccept * probAccept;
int v = _context.random().nextInt(100);
if (v < probAccept * 100) {
// ok
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically accept tunnel request (p=" + probAccept + " v=" + v + " test time avg 1m=" + avg1m + " 10m=" + avg10m + ")");
// } else if (false) {
// if (_log.shouldLog(Log.WARN))
// _log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
// + " 10m=" + avg10m + ")");
// _context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg10m), 0);
// setTunnelStatus("Rejecting " + ((int) probAccept*100) + "% of tunnels: High test time");
// return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
}
} else {
// not yet...
// if (_log.shouldLog(Log.INFO))
// _log.info("Accepting tunnel request, since 60m test time average is " + avg10m
// + " and past 1m only has " + avg1m + ")");
}
}
// ok, we're not hosed, but can we handle the bandwidth requirements
// of another tunnel?
rs = _context.statManager().getRate("tunnel.participatingMessageCountAvgPerTunnel");
r = null;
double messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
if (rs != null) {
r = rs.getRate(60 * 1000);
if (r != null)
messagesPerTunnel = r.computeAverages(ra, true).getAverage();
}
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
double bytesAllocated = messagesPerTunnel * numTunnels * PREPROCESSED_SIZE;
if (!allowTunnel(bytesAllocated, numTunnels)) {
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long) bytesAllocated);
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
/**
* int queuedRequests = _context.tunnelManager().getInboundBuildQueueSize();
* int timePerRequest = 1000;
* rs = _context.statManager().getRate("tunnel.decryptRequestTime");
* if (rs != null) {
* r = rs.getRate(60*1000);
* if (r.getLastEventCount() > 0)
* timePerRequest = (int)r.getAverageValue();
* else
* timePerRequest = (int)rs.getLifetimeAverageValue();
* }
* float pctFull = (queuedRequests * timePerRequest) / (4*1000f);
* double pReject = Math.pow(pctFull, 16); //1 - ((1-pctFull) * (1-pctFull));
**
*/
// let it in because we drop overload- rejecting may be overkill,
// especially since we've done the cpu-heavy lifting to figure out
// whats up
/*
if ( (pctFull >= 1) || (pReject >= _context.random().nextFloat()) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting a new tunnel request because we have too many pending requests (" + queuedRequests
+ " at " + timePerRequest + "ms each, %full = " + pctFull);
_context.statManager().addRateData("router.throttleTunnelQueueOverload", queuedRequests, timePerRequest);
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
*/
// ok, all is well, let 'er in
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long) bytesAllocated, 60 * 10 * 1000);
// + " tunnels with lag of " + lag + ")");
return TUNNEL_ACCEPT;
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class RouterThrottleImpl method getInboundRateDelta.
public double getInboundRateDelta() {
RateStat receiveRate = _context.statManager().getRate("transport.sendMessageSize");
if (receiveRate == null)
return 0;
double nowBps = getBps(receiveRate.getRate(60 * 1000));
double fiveMinBps = getBps(receiveRate.getRate(5 * 60 * 1000));
double hourBps = getBps(receiveRate.getRate(60 * 60 * 1000));
double dailyBps = getBps(receiveRate.getRate(24 * 60 * 60 * 1000));
if (nowBps < 0)
return 0;
if (dailyBps > 0)
return nowBps - dailyBps;
if (hourBps > 0)
return nowBps - hourBps;
if (fiveMinBps > 0)
return nowBps - fiveMinBps;
return 0;
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class RouterThrottleImpl method getMessageDelay.
public long getMessageDelay() {
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
if (rs == null)
return 0;
Rate delayRate = rs.getRate(60 * 1000);
return (long) delayRate.getAverageValue();
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class StatisticsManager method includeTunnelRates.
/**
* Add tunnel build rates with some mods to hide absolute quantities
* In particular, report counts normalized to 100 (i.e. a percentage)
*/
private void includeTunnelRates(String tunnelType, Properties stats, long selectedPeriod) {
long totalEvents = 0;
for (String tunnelStat : tunnelStats) {
String rateName = "tunnel.build" + tunnelType + tunnelStat;
RateStat stat = _context.statManager().getRate(rateName);
if (stat == null)
continue;
Rate curRate = stat.getRate(selectedPeriod);
if (curRate == null)
continue;
totalEvents += curRate.getLastEventCount();
}
if (totalEvents <= 0)
return;
for (String tunnelStat : tunnelStats) {
String rateName = "tunnel.build" + tunnelType + tunnelStat;
RateStat stat = _context.statManager().getRate(rateName);
if (stat == null)
continue;
Rate curRate = stat.getRate(selectedPeriod);
if (curRate == null)
continue;
double fudgeQuantity = 100.0d * curRate.getLastEventCount() / totalEvents;
stats.setProperty("stat_" + rateName + '.' + getPeriod(curRate), renderRate(curRate, fudgeQuantity));
}
}
Aggregations