use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class BuildExecutor method allowed.
private int allowed() {
CommSystemFacade csf = _context.commSystem();
if (csf.getStatus() == Status.DISCONNECTED)
return 0;
if (csf.isDummy() && csf.getEstablished().size() <= 0)
return 0;
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
// Max. 1 concurrent build per 6 KB/s outbound
int allowed = maxKBps / 6;
RateStat rs = _context.statManager().getRate("tunnel.buildRequestTime");
if (rs != null) {
Rate r = rs.getRate(60 * 1000);
double avg = 0;
if (r != null)
avg = r.getAverageValue();
if (avg <= 0)
avg = rs.getLifetimeAverageValue();
if (avg > 1) {
// If builds take more than 75 ms, start throttling
int throttle = (int) (75 * MAX_CONCURRENT_BUILDS / avg);
if (throttle < allowed) {
allowed = throttle;
if (allowed < MAX_CONCURRENT_BUILDS && _log.shouldLog(Log.INFO))
_log.info("Throttling max builds to " + allowed + " due to avg build time of " + ((int) avg) + " ms");
}
}
}
if (allowed < 2)
// Never choke below 2 builds (but congestion may)
allowed = 2;
else if (allowed > MAX_CONCURRENT_BUILDS)
allowed = MAX_CONCURRENT_BUILDS;
allowed = _context.getProperty("router.tunnelConcurrentBuilds", allowed);
// expire any REALLY old requests
long expireBefore = _context.clock().now() + 10 * 60 * 1000 - BuildRequestor.REQUEST_TIMEOUT - GRACE_PERIOD;
for (Iterator<PooledTunnelCreatorConfig> iter = _recentlyBuildingMap.values().iterator(); iter.hasNext(); ) {
PooledTunnelCreatorConfig cfg = iter.next();
if (cfg.getExpiration() <= expireBefore) {
iter.remove();
}
}
// expire any old requests
List<PooledTunnelCreatorConfig> expired = null;
int concurrent = 0;
// Todo: Make expiration variable
expireBefore = _context.clock().now() + 10 * 60 * 1000 - BuildRequestor.REQUEST_TIMEOUT;
for (Iterator<PooledTunnelCreatorConfig> iter = _currentlyBuildingMap.values().iterator(); iter.hasNext(); ) {
PooledTunnelCreatorConfig cfg = iter.next();
if (cfg.getExpiration() <= expireBefore) {
// save them for another minute
_recentlyBuildingMap.putIfAbsent(Long.valueOf(cfg.getReplyMessageId()), cfg);
iter.remove();
if (expired == null)
expired = new ArrayList<PooledTunnelCreatorConfig>();
expired.add(cfg);
}
}
concurrent = _currentlyBuildingMap.size();
allowed -= concurrent;
if (expired != null) {
for (int i = 0; i < expired.size(); i++) {
PooledTunnelCreatorConfig cfg = expired.get(i);
if (_log.shouldLog(Log.INFO))
_log.info("Timed out waiting for reply asking for " + cfg);
// Also note the fact that this tunnel request timed out in the peers' profiles.
for (int iPeer = 0; iPeer < cfg.getLength(); iPeer++) {
// Look up peer
Hash peer = cfg.getPeer(iPeer);
// Avoid recording ourselves
if (peer.equals(_context.routerHash()))
continue;
// Look up routerInfo
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(peer);
// Default and detect bandwidth tier
String bwTier = "Unknown";
// Returns "Unknown" if none recognized
if (ri != null)
bwTier = ri.getBandwidthTier();
// Record that a peer of the given tier expired
_context.statManager().addRateData("tunnel.tierExpire" + bwTier, 1);
didNotReply(cfg.getReplyMessageId(), peer);
// Blame everybody since we don't know whose fault it is.
// (it could be our exploratory tunnel's fault too...)
_context.profileManager().tunnelTimedOut(peer);
}
TunnelPool pool = cfg.getTunnelPool();
if (pool != null)
pool.buildComplete(cfg);
if (cfg.getDestination() == null) {
_context.statManager().addRateData("tunnel.buildExploratoryExpire", 1);
// if (cfg.isInbound())
// _context.statManager().addRateData("tunnel.buildExploratoryExpireIB", 1);
// else
// _context.statManager().addRateData("tunnel.buildExploratoryExpireOB", 1);
} else {
_context.statManager().addRateData("tunnel.buildClientExpire", 1);
// if (cfg.isInbound())
// _context.statManager().addRateData("tunnel.buildClientExpireIB", 1);
// else
// _context.statManager().addRateData("tunnel.buildClientExpireOB", 1);
}
}
}
_context.statManager().addRateData("tunnel.concurrentBuilds", concurrent, 0);
long lag = _context.jobQueue().getMaxLag();
if ((lag > 2000) && (_context.router().getUptime() > 5 * 60 * 1000)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Too lagged [" + lag + "], don't allow building");
_context.statManager().addRateData("tunnel.concurrentBuildsLagged", concurrent, lag);
// if we have a job heavily blocking our jobqueue, ssllloowww dddooowwwnnn
return 0;
}
return allowed;
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class TunnelHistory method createRates.
private void createRates(String statGroup) {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 10 * 60 * 1000l, 30 * 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 10 * 60 * 1000l, 30 * 60 * 1000l, 60 * 60 * 1000l, 24 * 60 * 60 * 1000l });
_rejectRate.setStatLog(_context.statManager().getStatLog());
_failRate.setStatLog(_context.statManager().getStatLog());
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class CoalesceStatsEvent method timeReached.
public void timeReached() {
int active = getContext().commSystem().countActivePeers();
getContext().statManager().addRateData("router.activePeers", active, 60 * 1000);
int activeSend = getContext().commSystem().countActiveSendPeers();
getContext().statManager().addRateData("router.activeSendPeers", activeSend, 60 * 1000);
int fast = getContext().profileOrganizer().countFastPeers();
getContext().statManager().addRateData("router.fastPeers", fast, 60 * 1000);
int highCap = getContext().profileOrganizer().countHighCapacityPeers();
getContext().statManager().addRateData("router.highCapacityPeers", highCap, 60 * 1000);
int integrated = getContext().peerManager().getPeersByCapability('f').size();
getContext().statManager().addRateData("router.integratedPeers", integrated, 60 * 1000);
getContext().statManager().addRateData("bw.sendRate", (long) getContext().bandwidthLimiter().getSendBps());
getContext().statManager().addRateData("bw.recvRate", (long) getContext().bandwidthLimiter().getReceiveBps());
getContext().statManager().addRateData("router.tunnelBacklog", getContext().tunnelManager().getInboundBuildQueueSize(), 60 * 1000);
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
getContext().statManager().addRateData("router.memoryUsed", used);
if (_maxMemory - used < LOW_MEMORY_THRESHOLD)
Router.clearCaches();
getContext().tunnelDispatcher().updateParticipatingStats(Router.COALESCE_TIME);
getContext().statManager().coalesceStats();
RateStat receiveRate = getContext().statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60 * 1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes * 1000.0d) / rate.getPeriod();
getContext().statManager().addRateData("bw.receiveBps", (long) bps, 60 * 1000);
}
}
RateStat sendRate = getContext().statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = sendRate.getRate(60 * 1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes * 1000.0d) / rate.getPeriod();
getContext().statManager().addRateData("bw.sendBps", (long) bps, 60 * 1000);
}
}
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class RouterWatchdog method dumpStatus.
private void dumpStatus() {
if (_log.shouldLog(Log.ERROR)) {
/*
Job cur = _context.jobQueue().getLastJob();
if (cur != null)
_log.error("Most recent job: " + cur);
_log.error("Last job began: "
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobBegin())
+ " ago");
_log.error("Last job ended: "
+ DataHelper.formatDuration(_context.clock().now()-_context.jobQueue().getLastJobEnd())
+ " ago");
*/
_log.error("Ready and waiting jobs: " + _context.jobQueue().getReadyCount());
_log.error("Job lag: " + _context.jobQueue().getMaxLag());
_log.error("Participating tunnel count: " + _context.tunnelManager().getParticipatingCount());
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
Rate r = null;
if (rs != null)
r = rs.getRate(60 * 1000);
double processTime = (r != null ? r.getAverageValue() : 0);
_log.error("1 minute send processing time: " + DataHelper.formatDuration((long) processTime));
rs = _context.statManager().getRate("bw.sendBps");
r = null;
if (rs != null)
r = rs.getRate(60 * 1000);
double bps = (r != null ? r.getAverageValue() : 0);
_log.error("Outbound send rate: " + DataHelper.formatSize((long) bps) + "Bps");
long max = Runtime.getRuntime().maxMemory();
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
_log.error("Memory: " + DataHelper.formatSize(used) + "B / " + DataHelper.formatSize(max) + 'B');
if (_consecutiveErrors == 1) {
_log.log(Log.CRIT, "Router appears hung, or there is severe network congestion. Watchdog starts barking!");
_context.router().eventLog().addEvent(EventLog.WATCHDOG);
// This works on linux...
// It won't on windows, and we can't call i2prouter.bat either, it does something
// completely different...
long now = _context.clock().now();
if (now - _lastDump > MIN_DUMP_INTERVAL) {
_lastDump = now;
ThreadDump.dump(_context, 10);
}
}
}
}
use of net.i2p.stat.RateStat in project i2p.i2p by i2p.
the class ProfileOrganizerRenderer method davg.
private String davg(DBHistory dbh, long rate, RateAverages ra) {
RateStat rs = dbh.getFailedLookupRate();
if (rs == null)
return "0%";
Rate r = rs.getRate(rate);
if (r == null)
return "0%";
r.computeAverages(ra, false);
if (ra.getTotalEventCount() <= 0)
return "0%";
double avg = 0.5 + 100 * ra.getAverage();
return ((int) avg) + "%";
}
Aggregations