Search in sources :

Example 6 with RateAverages

use of net.i2p.stat.RateAverages in project i2p.i2p by i2p.

the class RouterThrottleImpl method acceptTunnelRequest.

/**
 *  If we should send a reject, return a nonzero reject code.
 *  Anything that causes us to drop a request instead of rejecting it
 *  must go in BuildHandler.handleInboundRequest(), not here.
 *
 *  @return 0 for accept or nonzero reject code
 */
public int acceptTunnelRequest() {
    if (_context.router().gracefulShutdownInProgress()) {
        if (_log.shouldLog(Log.WARN))
            _log.warn("Refusing tunnel request since we are shutting down ASAP");
        setShutdownStatus();
        // Don't use CRIT because this tells everybody we are shutting down
        return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
    }
    // Don't use CRIT because we don't want peers to think we're failing
    if (_context.router().getUptime() < DEFAULT_REJECT_STARTUP_TIME) {
        setTunnelStatus(_x("Rejecting tunnels: Starting up"));
        return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
    }
    /**
     ** Moved to BuildHandler
     *        long lag = _context.jobQueue().getMaxLag();
     *        if (lag > JOB_LAG_LIMIT_TUNNEL) {
     *            if (_log.shouldLog(Log.WARN))
     *                _log.warn("Refusing tunnel request, as the job lag is " + lag);
     *            _context.statManager().addRateData("router.throttleTunnelCause", lag);
     *            setTunnelStatus(_x("Rejecting tunnels: High job lag"));
     *            return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
     *        }
     ***
     */
    RateAverages ra = RateAverages.getTemp();
    // TODO
    // This stat is highly dependent on transport mix.
    // For NTCP, it is queueing delay only, ~25ms
    // For SSU it is queueing + ack time, ~1000 ms.
    // (SSU acks may be delayed so it is much more than just RTT... and the delay may
    // counterintuitively be more when there is low traffic)
    // Change the stat or pick a better stat.
    RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
    Rate r = null;
    if (rs != null)
        r = rs.getRate(60 * 1000);
    // Reject tunnels if the time to process messages and send them is too large. Too much time implies congestion.
    if (r != null) {
        r.computeAverages(ra, false);
        int maxProcessingTime = _context.getProperty(PROP_MAX_PROCESSINGTIME, DEFAULT_MAX_PROCESSINGTIME);
        // Set throttling if necessary
        if ((ra.getAverage() > maxProcessingTime * 0.9 || ra.getCurrent() > maxProcessingTime || ra.getLast() > maxProcessingTime)) {
            if (_log.shouldLog(Log.WARN)) {
                _log.warn("Refusing tunnel request due to sendProcessingTime " + ((int) ra.getCurrent()) + " / " + ((int) ra.getLast()) + " / " + ((int) ra.getAverage()) + " / " + maxProcessingTime + " current/last/avg/max ms");
            }
            setTunnelStatus(_x("Rejecting tunnels: High message delay"));
            return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
        }
    }
    int numTunnels = _context.tunnelManager().getParticipatingCount();
    int maxTunnels = _context.getProperty(PROP_MAX_TUNNELS, DEFAULT_MAX_TUNNELS);
    if (numTunnels >= maxTunnels) {
        if (_log.shouldLog(Log.WARN))
            _log.warn("Refusing tunnel request since we are already participating in " + numTunnels + " (our max is " + maxTunnels + ")");
        _context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels);
        setTunnelStatus(_x("Rejecting tunnels: Limit reached"));
        return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
    }
    /*
         * Throttle if we go above a minimum level of tunnels AND the maximum participating
         * tunnels is default or lower.
         *
         * Lag based statistics use a moving average window (of for example 10 minutes), they are therefore
         * sensitive to sudden rapid growth of load, which are not instantly detected by these metrics.
         * Reduce tunnel growth if we are growing faster than the lag based metrics can detect reliably.
         */
    if ((numTunnels > getMinThrottleTunnels()) && (DEFAULT_MAX_TUNNELS >= maxTunnels)) {
        Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(10 * 60 * 1000);
        if (avgTunnels != null) {
            double avg = avgTunnels.getAvgOrLifetimeAvg();
            double tunnelGrowthFactor = getTunnelGrowthFactor();
            int min = getMinThrottleTunnels();
            if (avg < min)
                avg = min;
            // if the current tunnel count is higher than 1.3 * the average...
            if ((avg > 0) && (avg * tunnelGrowthFactor < numTunnels)) {
                // we're accelerating, lets try not to take on too much too fast
                double probAccept = (avg * tunnelGrowthFactor) / numTunnels;
                // square the decelerator for tunnel counts
                probAccept *= probAccept;
                int v = _context.random().nextInt(100);
                if (v < probAccept * 100) {
                    // ok
                    if (_log.shouldLog(Log.INFO))
                        _log.info("Probabalistically accept tunnel request (p=" + probAccept + " v=" + v + " avg=" + avg + " current=" + numTunnels + ")");
                } else {
                    if (_log.shouldLog(Log.WARN))
                        _log.warn("Probabalistically refusing tunnel request (avg=" + avg + " current=" + numTunnels + ")");
                    _context.statManager().addRateData("router.throttleTunnelProbTooFast", (long) (numTunnels - avg));
                    // setTunnelStatus("Rejecting " + (100 - (int) probAccept*100) + "% of tunnels: High number of requests");
                    if (probAccept <= 0.5)
                        setTunnelStatus(_x("Rejecting most tunnels: High number of requests"));
                    else if (probAccept <= 0.9)
                        setTunnelStatus(_x("Accepting most tunnels"));
                    else
                        setTunnelStatus(_x("Accepting tunnels"));
                    return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
                }
            } else {
                if (_log.shouldLog(Log.INFO))
                    _log.info("Accepting tunnel request, since the tunnel count average is " + avg + " and we only have " + numTunnels + ")");
            }
        }
    }
    double tunnelTestTimeGrowthFactor = getTunnelTestTimeGrowthFactor();
    Rate tunnelTestTime1m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(1 * 60 * 1000);
    Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10 * 60 * 1000);
    if ((tunnelTestTime1m != null) && (tunnelTestTime10m != null) && (tunnelTestTime1m.getLastEventCount() > 0)) {
        double avg1m = tunnelTestTime1m.getAverageValue();
        double avg10m = tunnelTestTime10m.getAvgOrLifetimeAvg();
        if (avg10m < 5000)
            // minimum before complaining
            avg10m = 5000;
        if ((avg10m > 0) && (avg1m > avg10m * tunnelTestTimeGrowthFactor)) {
            double probAccept = (avg10m * tunnelTestTimeGrowthFactor) / avg1m;
            // square the decelerator for test times
            probAccept = probAccept * probAccept;
            int v = _context.random().nextInt(100);
            if (v < probAccept * 100) {
                // ok
                if (_log.shouldLog(Log.INFO))
                    _log.info("Probabalistically accept tunnel request (p=" + probAccept + " v=" + v + " test time avg 1m=" + avg1m + " 10m=" + avg10m + ")");
            // } else if (false) {
            // if (_log.shouldLog(Log.WARN))
            // _log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
            // + " 10m=" + avg10m + ")");
            // _context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg10m), 0);
            // setTunnelStatus("Rejecting " + ((int) probAccept*100) + "% of tunnels: High test time");
            // return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
            }
        } else {
        // not yet...
        // if (_log.shouldLog(Log.INFO))
        // _log.info("Accepting tunnel request, since 60m test time average is " + avg10m
        // + " and past 1m only has " + avg1m + ")");
        }
    }
    // ok, we're not hosed, but can we handle the bandwidth requirements
    // of another tunnel?
    rs = _context.statManager().getRate("tunnel.participatingMessageCountAvgPerTunnel");
    r = null;
    double messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
    if (rs != null) {
        r = rs.getRate(60 * 1000);
        if (r != null)
            messagesPerTunnel = r.computeAverages(ra, true).getAverage();
    }
    if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
        messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
    double bytesAllocated = messagesPerTunnel * numTunnels * PREPROCESSED_SIZE;
    if (!allowTunnel(bytesAllocated, numTunnels)) {
        _context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long) bytesAllocated);
        return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
    }
    /**
     *        int queuedRequests = _context.tunnelManager().getInboundBuildQueueSize();
     *        int timePerRequest = 1000;
     *        rs = _context.statManager().getRate("tunnel.decryptRequestTime");
     *        if (rs != null) {
     *            r = rs.getRate(60*1000);
     *            if (r.getLastEventCount() > 0)
     *                timePerRequest = (int)r.getAverageValue();
     *            else
     *                timePerRequest = (int)rs.getLifetimeAverageValue();
     *        }
     *        float pctFull = (queuedRequests * timePerRequest) / (4*1000f);
     *        double pReject = Math.pow(pctFull, 16); //1 - ((1-pctFull) * (1-pctFull));
     **
     */
    // let it in because we drop overload- rejecting may be overkill,
    // especially since we've done the cpu-heavy lifting to figure out
    // whats up
    /*
        if ( (pctFull >= 1) || (pReject >= _context.random().nextFloat()) ) {
            if (_log.shouldLog(Log.WARN))
                _log.warn("Rejecting a new tunnel request because we have too many pending requests (" + queuedRequests 
                          + " at " + timePerRequest + "ms each, %full = " + pctFull);
            _context.statManager().addRateData("router.throttleTunnelQueueOverload", queuedRequests, timePerRequest);
            return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
        }
        */
    // ok, all is well, let 'er in
    _context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long) bytesAllocated, 60 * 10 * 1000);
    // + " tunnels with lag of " + lag + ")");
    return TUNNEL_ACCEPT;
}
Also used : RateStat(net.i2p.stat.RateStat) Rate(net.i2p.stat.Rate) RateAverages(net.i2p.stat.RateAverages)

Aggregations

Rate (net.i2p.stat.Rate)6 RateAverages (net.i2p.stat.RateAverages)6 RateStat (net.i2p.stat.RateStat)4 Hash (net.i2p.data.Hash)2 RouterInfo (net.i2p.data.router.RouterInfo)2 DBHistory (net.i2p.router.peermanager.DBHistory)2 PeerProfile (net.i2p.router.peermanager.PeerProfile)2 TreeSet (java.util.TreeSet)1 ConvertToHash (net.i2p.util.ConvertToHash)1