Search in sources :

Example 6 with Job

use of net.i2p.router.Job in project i2p.i2p by i2p.

the class FloodfillMonitorJob method runJob.

public synchronized void runJob() {
    boolean wasFF = _facade.floodfillEnabled();
    boolean ff = shouldBeFloodfill();
    _facade.setFloodfillEnabledFromMonitor(ff);
    if (ff != wasFF) {
        if (ff) {
            getContext().router().eventLog().addEvent(EventLog.BECAME_FLOODFILL);
        } else {
            getContext().router().eventLog().addEvent(EventLog.NOT_FLOODFILL);
        }
        getContext().router().rebuildRouterInfo(true);
        Job routerInfoFlood = new FloodfillRouterInfoFloodJob(getContext(), _facade);
        if (getContext().router().getUptime() < 5 * 60 * 1000) {
            if (!_deferredFlood) {
                // Needed to prevent race if router.floodfillParticipant=true (not auto)
                // Don't queue multiples
                _deferredFlood = true;
                routerInfoFlood.getTiming().setStartAfter(getContext().clock().now() + 5 * 60 * 1000);
                getContext().jobQueue().addJob(routerInfoFlood);
                if (_log.shouldLog(Log.DEBUG))
                    _log.logAlways(Log.DEBUG, "Deferring our FloodfillRouterInfoFloodJob run because of low uptime.");
            }
        } else {
            routerInfoFlood.runJob();
            if (_log.shouldLog(Log.DEBUG)) {
                _log.logAlways(Log.DEBUG, "Running FloodfillRouterInfoFloodJob");
            }
        }
    }
    if (_log.shouldLog(Log.INFO))
        _log.info("Should we be floodfill? " + ff);
    int delay = (REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY);
    // TODO: somehow assess the size of the network to make this adaptive?
    if (!ff)
        // this was 7, reduced for moar FFs --zab
        delay *= 4;
    requeue(delay);
}
Also used : Job(net.i2p.router.Job)

Example 7 with Job

use of net.i2p.router.Job in project i2p.i2p by i2p.

the class FloodfillNetworkDatabaseFacade method flood.

/**
 *  Send to a subset of all floodfill peers.
 *  We do this to implement Kademlia within the floodfills, i.e.
 *  we flood to those closest to the key.
 */
public void flood(DatabaseEntry ds) {
    Hash key = ds.getHash();
    RouterKeyGenerator gen = _context.routerKeyGenerator();
    Hash rkey = gen.getRoutingKey(key);
    FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector();
    List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
    // todo key cert skip?
    long until = gen.getTimeTillMidnight();
    if (until < NEXT_RKEY_LS_ADVANCE_TIME || (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && until < NEXT_RKEY_RI_ADVANCE_TIME)) {
        // to avoid lookup faulures after midnight, also flood to some closest to the
        // next routing key for a period of time before midnight.
        Hash nkey = gen.getNextRoutingKey(key);
        List<Hash> nextPeers = sel.selectFloodfillParticipants(nkey, NEXT_FLOOD_QTY, getKBuckets());
        int i = 0;
        for (Hash h : nextPeers) {
            // But other implementations may not...
            if (h.equals(key))
                continue;
            // todo key cert skip?
            if (!peers.contains(h)) {
                peers.add(h);
                i++;
            }
        }
        if (i > 0 && _log.shouldLog(Log.INFO))
            _log.info("Flooding the entry for " + key + " to " + i + " more, just before midnight");
    }
    int flooded = 0;
    for (int i = 0; i < peers.size(); i++) {
        Hash peer = peers.get(i);
        RouterInfo target = lookupRouterInfoLocally(peer);
        if ((target == null) || (_context.banlist().isBanlisted(peer)))
            continue;
        // But other implementations may not...
        if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && peer.equals(key))
            continue;
        if (peer.equals(_context.routerHash()))
            continue;
        DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
        msg.setEntry(ds);
        OutNetMessage m = new OutNetMessage(_context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target);
        Job floodFail = new FloodFailedJob(_context, peer);
        m.setOnFailedSendJob(floodFail);
        // we want to give credit on success, even if we aren't sure,
        // because otherwise no use noting failure
        Job floodGood = new FloodSuccessJob(_context, peer);
        m.setOnSendJob(floodGood);
        _context.commSystem().processMessage(m);
        flooded++;
        if (_log.shouldLog(Log.INFO))
            _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
    }
    if (_log.shouldLog(Log.INFO))
        _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
}
Also used : OutNetMessage(net.i2p.router.OutNetMessage) RouterInfo(net.i2p.data.router.RouterInfo) RouterKeyGenerator(net.i2p.data.router.RouterKeyGenerator) DatabaseStoreMessage(net.i2p.data.i2np.DatabaseStoreMessage) Hash(net.i2p.data.Hash) Job(net.i2p.router.Job)

Example 8 with Job

use of net.i2p.router.Job in project i2p.i2p by i2p.

the class FloodfillNetworkDatabaseFacade method startup.

@Override
public synchronized void startup() {
    super.startup();
    _context.jobQueue().addJob(_ffMonitor);
    _lookupThrottler = new LookupThrottler();
    // refresh old routers
    Job rrj = new RefreshRoutersJob(_context, this);
    rrj.getTiming().setStartAfter(_context.clock().now() + 5 * 60 * 1000);
    _context.jobQueue().addJob(rrj);
}
Also used : Job(net.i2p.router.Job)

Example 9 with Job

use of net.i2p.router.Job in project i2p.i2p by i2p.

the class TransportImpl method afterSend.

/**
 * The transport is done sending this message.  This is the method that actually
 * does all of the cleanup - firing off jobs, requeueing, updating stats, etc.
 *
 * @param msg message in question
 * @param sendSuccessful true if the peer received it
 * @param msToSend how long it took to transfer the data to the peer
 * @param allowRequeue true if we should try other transports if available
 */
protected void afterSend(OutNetMessage msg, boolean sendSuccessful, boolean allowRequeue, long msToSend) {
    if (msg.getTarget() == null) {
        // Bail out now as it will NPE in a dozen places below.
        return;
    }
    boolean log = false;
    if (sendSuccessful)
        msg.timestamp("afterSend(successful)");
    else
        msg.timestamp("afterSend(failed)");
    if (!sendSuccessful)
        msg.transportFailed(getStyle());
    if (msToSend > 1500) {
        if (_log.shouldLog(Log.INFO))
            _log.info(getStyle() + " afterSend slow: " + (sendSuccessful ? "success " : "FAIL ") + msg.getMessageSize() + " byte " + msg.getMessageType() + ' ' + msg.getMessageId() + " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0, 6) + " took " + msToSend + " ms");
    }
    // if (true)
    // _log.error("(not error) I2NP message sent? " + sendSuccessful + " " + msg.getMessageId() + " after " + msToSend + "/" + msg.getTransmissionTime());
    long lifetime = msg.getLifetime();
    if (lifetime > 3000) {
        int level = Log.INFO;
        if (!sendSuccessful)
            level = Log.DEBUG;
        if (_log.shouldLog(level))
            _log.log(level, getStyle() + " afterSend slow (" + (sendSuccessful ? "success " : "FAIL ") + lifetime + "/" + msToSend + "): " + msg.getMessageSize() + " byte " + msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0, 6) + " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0, 6) + ": " + msg.toString());
    } else {
        if (_log.shouldLog(Log.INFO))
            _log.info(getStyle() + " afterSend: " + (sendSuccessful ? "success " : "FAIL ") + msg.getMessageSize() + " byte " + msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0, 6) + " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0, 6) + "\n" + msg.toString());
    }
    if (sendSuccessful) {
        if (_log.shouldLog(Log.DEBUG))
            _log.debug(getStyle() + " Sent " + msg.getMessageType() + " successfully to " + msg.getTarget().getIdentity().getHash().toBase64());
        Job j = msg.getOnSendJob();
        if (j != null)
            _context.jobQueue().addJob(j);
        log = true;
        msg.discardData();
    } else {
        if (_log.shouldLog(Log.INFO))
            _log.info(getStyle() + " Failed to send " + msg.getMessageType() + " to " + msg.getTarget().getIdentity().getHash().toBase64() + " (details: " + msg + ')');
        if (msg.getExpiration() < _context.clock().now())
            _context.statManager().addRateData("transport.expiredOnQueueLifetime", lifetime);
        if (allowRequeue) {
            if (((msg.getExpiration() <= 0) || (msg.getExpiration() > _context.clock().now())) && (msg.getMessage() != null)) {
                // this may not be the last transport available - keep going
                _context.outNetMessagePool().add(msg);
            // don't discard the data yet!
            } else {
                if (_log.shouldLog(Log.INFO))
                    _log.info("No more time left (" + new Date(msg.getExpiration()) + ", expiring without sending successfully the " + msg.getMessageType());
                if (msg.getOnFailedSendJob() != null)
                    _context.jobQueue().addJob(msg.getOnFailedSendJob());
                MessageSelector selector = msg.getReplySelector();
                if (selector != null) {
                    _context.messageRegistry().unregisterPending(msg);
                }
                log = true;
                msg.discardData();
            }
        } else {
            MessageSelector selector = msg.getReplySelector();
            if (_log.shouldLog(Log.INFO))
                _log.info("Failed and no requeue allowed for a " + msg.getMessageSize() + " byte " + msg.getMessageType() + " message with selector " + selector, new Exception("fail cause"));
            if (msg.getOnFailedSendJob() != null)
                _context.jobQueue().addJob(msg.getOnFailedSendJob());
            if (msg.getOnFailedReplyJob() != null)
                _context.jobQueue().addJob(msg.getOnFailedReplyJob());
            if (selector != null)
                _context.messageRegistry().unregisterPending(msg);
            log = true;
            msg.discardData();
        }
    }
    if (log) {
    /*
            String type = msg.getMessageType();
            // the udp transport logs some further details
            _context.messageHistory().sendMessage(type, msg.getMessageId(),
                                                  msg.getExpiration(),
                                                  msg.getTarget().getIdentity().getHash(),
                                                  sendSuccessful);
             */
    }
    long now = _context.clock().now();
    long sendTime = now - msg.getSendBegin();
    long allTime = now - msg.getCreated();
    if (allTime > 5 * 1000) {
        if (_log.shouldLog(Log.INFO))
            _log.info("Took too long from preparation to afterSend(ok? " + sendSuccessful + "): " + allTime + "ms/" + sendTime + "ms after failing on: " + msg.getFailedTransports() + " and succeeding on " + getStyle());
        if ((allTime > 60 * 1000) && (sendSuccessful)) {
            // VERY slow
            if (_log.shouldLog(Log.WARN))
                _log.warn("Severe latency? More than a minute slow? " + msg.getMessageType() + " of id " + msg.getMessageId() + " (send begin on " + new Date(msg.getSendBegin()) + " / created on " + new Date(msg.getCreated()) + "): " + msg);
            _context.messageHistory().messageProcessingError(msg.getMessageId(), msg.getMessageType(), "Took too long to send [" + allTime + "ms]");
        }
    }
    if (sendSuccessful) {
        // TODO fix this stat for SSU ticket #698
        _context.statManager().addRateData("transport.sendProcessingTime", lifetime);
        // object churn. 33 ms for NTCP and 788 for SSU, but meaningless due to
        // differences in how it's computed (immediate vs. round trip)
        // _context.statManager().addRateData("transport.sendProcessingTime." + getStyle(), lifetime, 0);
        _context.profileManager().messageSent(msg.getTarget().getIdentity().getHash(), getStyle(), sendTime, msg.getMessageSize());
        _context.statManager().addRateData("transport.sendMessageSize", msg.getMessageSize(), sendTime);
    } else {
        _context.profileManager().messageFailed(msg.getTarget().getIdentity().getHash(), getStyle());
        _context.statManager().addRateData("transport.sendMessageFailureLifetime", lifetime);
    }
}
Also used : MessageSelector(net.i2p.router.MessageSelector) Job(net.i2p.router.Job) Date(java.util.Date) IOException(java.io.IOException)

Example 10 with Job

use of net.i2p.router.Job in project i2p.i2p by i2p.

the class ClientManager method distributeMessage.

/**
 * Distribute message to a local or remote destination.
 * @param msgId the router's ID for this message
 * @param messageNonce the client's ID for this message
 * @param flags ignored for local
 */
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long messageNonce, long expiration, int flags) {
    // check if there is a runner for it
    ClientConnectionRunner runner = getRunner(toDest);
    if (runner != null) {
        if (_log.shouldLog(Log.DEBUG))
            _log.debug("Message " + msgId + " is targeting a local destination.  distribute it as such");
        ClientConnectionRunner sender = getRunner(fromDest);
        if (sender == null) {
            // sender went away
            return;
        }
        // run this inline so we don't clog up the job queue
        Job j = new DistributeLocal(toDest, runner, sender, fromDest, payload, msgId, messageNonce);
        // _ctx.jobQueue().addJob(j);
        j.runJob();
    } else {
        // remote.  w00t
        if (_log.shouldLog(Log.DEBUG))
            _log.debug("Message " + msgId + " is targeting a REMOTE destination!  Added to the client message pool");
        runner = getRunner(fromDest);
        if (runner == null) {
            // sender went away
            return;
        }
        SessionConfig config = runner.getConfig(fromDest.calculateHash());
        if (config == null)
            return;
        ClientMessage msg = new ClientMessage(toDest, payload, config, fromDest, msgId, messageNonce, expiration, flags);
        _ctx.clientMessagePool().add(msg, true);
    }
}
Also used : SessionConfig(net.i2p.data.i2cp.SessionConfig) ClientMessage(net.i2p.router.ClientMessage) Job(net.i2p.router.Job)

Aggregations

Job (net.i2p.router.Job)20 Hash (net.i2p.data.Hash)9 ReplyJob (net.i2p.router.ReplyJob)7 RouterInfo (net.i2p.data.router.RouterInfo)4 TunnelInfo (net.i2p.router.TunnelInfo)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 TunnelId (net.i2p.data.TunnelId)2 DatabaseLookupMessage (net.i2p.data.i2np.DatabaseLookupMessage)2 DatabaseStoreMessage (net.i2p.data.i2np.DatabaseStoreMessage)2 I2NPMessage (net.i2p.data.i2np.I2NPMessage)2 MessageSelector (net.i2p.router.MessageSelector)2 OutNetMessage (net.i2p.router.OutNetMessage)2 SendMessageDirectJob (net.i2p.router.message.SendMessageDirectJob)2 ReadConfigJob (net.i2p.router.tasks.ReadConfigJob)2 Date (java.util.Date)1 SessionConfig (net.i2p.data.i2cp.SessionConfig)1 DeliveryStatusMessage (net.i2p.data.i2np.DeliveryStatusMessage)1 TunnelGatewayMessage (net.i2p.data.i2np.TunnelGatewayMessage)1 RouterKeyGenerator (net.i2p.data.router.RouterKeyGenerator)1