Search in sources :

Example 6 with TunnelInfo

use of net.i2p.router.TunnelInfo in project i2p.i2p by i2p.

the class OutboundCache method clearCaches.

/**
 * Called on failure to give us a better chance of success next time.
 * Of course this is probably 60s too late.
 * And we could pick the bad ones at random again.
 * Or remove entries that were sent and succeeded after this was sent but before this failed.
 * But it's a start.
 *
 * @param lease may be null
 * @param inTunnel may be null
 * @param outTunnel may be null
 */
void clearCaches(HashPair hashPair, Lease lease, TunnelInfo inTunnel, TunnelInfo outTunnel) {
    if (inTunnel != null) {
        // if we wanted an ack, we sent our lease too
        leaseSetCache.remove(hashPair);
    }
    if (lease != null) {
        // remove only if still equal to lease (concurrent)
        leaseCache.remove(hashPair, lease);
    }
    if (outTunnel != null) {
        synchronized (tunnelCache) {
            TunnelInfo t = backloggedTunnelCache.get(hashPair);
            if (t != null && t.equals(outTunnel))
                backloggedTunnelCache.remove(hashPair);
            t = tunnelCache.get(hashPair);
            if (t != null && t.equals(outTunnel))
                tunnelCache.remove(hashPair);
        }
    }
}
Also used : TunnelInfo(net.i2p.router.TunnelInfo)

Example 7 with TunnelInfo

use of net.i2p.router.TunnelInfo in project i2p.i2p by i2p.

the class TunnelPool method configureNewTunnel.

/**
 *  @return null on failure
 */
private PooledTunnelCreatorConfig configureNewTunnel(boolean forceZeroHop) {
    TunnelPoolSettings settings = getSettings();
    // peers for new tunnel, including us, ENDPOINT FIRST
    List<Hash> peers = null;
    long now = _context.clock().now();
    long expiration = now + TunnelPoolSettings.DEFAULT_DURATION;
    if (!forceZeroHop) {
        int len = settings.getLengthOverride();
        if (len < 0)
            len = settings.getLength();
        if (len > 0 && (!settings.isExploratory()) && _context.random().nextBoolean()) {
            // look for a tunnel to reuse, if the right length and expiring soon
            // ignore variance for now.
            // us
            len++;
            synchronized (_tunnels) {
                for (TunnelInfo ti : _tunnels) {
                    if (ti.getLength() >= len && ti.getExpiration() < now + 3 * 60 * 1000 && !ti.wasReused()) {
                        ti.setReused();
                        len = ti.getLength();
                        peers = new ArrayList<Hash>(len);
                        // peers list is ordered endpoint first, but cfg.getPeer() is ordered gateway first
                        for (int i = len - 1; i >= 0; i--) {
                            peers.add(ti.getPeer(i));
                        }
                        break;
                    }
                }
            }
        }
        if (peers == null) {
            setLengthOverride();
            peers = _peerSelector.selectPeers(settings);
        }
        if ((peers == null) || (peers.isEmpty())) {
            // the pool is refusing 0 hop tunnels
            if (peers == null) {
                if (_log.shouldLog(Log.WARN))
                    _log.warn("No peers to put in the new tunnel! selectPeers returned null!  boo, hiss!");
            } else {
                if (_log.shouldLog(Log.WARN))
                    _log.warn("No peers to put in the new tunnel! selectPeers returned an empty list?!");
            }
            return null;
        }
    } else {
        peers = Collections.singletonList(_context.routerHash());
    }
    PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, peers.size(), settings.isInbound(), settings.getDestination());
    cfg.setTunnelPool(this);
    // peers list is ordered endpoint first, but cfg.getPeer() is ordered gateway first
    for (int i = 0; i < peers.size(); i++) {
        int j = peers.size() - 1 - i;
        cfg.setPeer(j, peers.get(i));
        HopConfig hop = cfg.getConfig(j);
        hop.setCreation(now);
        hop.setExpiration(expiration);
        hop.setIVKey(_context.keyGenerator().generateSessionKey());
        hop.setLayerKey(_context.keyGenerator().generateSessionKey());
    // tunnelIds will be updated during building, and as the creator, we
    // don't need to worry about prev/next hop
    }
    // note that this will be adjusted by expire job
    cfg.setExpiration(expiration);
    if (!settings.isInbound())
        cfg.setPriority(settings.getPriority());
    if (_log.shouldLog(Log.DEBUG))
        _log.debug("Config contains " + peers + ": " + cfg);
    synchronized (_inProgress) {
        _inProgress.add(cfg);
    }
    return cfg;
}
Also used : TunnelPoolSettings(net.i2p.router.TunnelPoolSettings) TunnelInfo(net.i2p.router.TunnelInfo) HopConfig(net.i2p.router.tunnel.HopConfig) Hash(net.i2p.data.Hash)

Example 8 with TunnelInfo

use of net.i2p.router.TunnelInfo in project i2p.i2p by i2p.

the class TunnelPool method needFallback.

/**
 * Do we really need more fallbacks?
 * Used to prevent a zillion of them
 */
boolean needFallback() {
    int needed = getAdjustedTotalQuantity();
    int fallbacks = 0;
    synchronized (_tunnels) {
        for (int i = 0; i < _tunnels.size(); i++) {
            TunnelInfo info = _tunnels.get(i);
            if (info.getLength() <= 1 && ++fallbacks >= needed)
                return false;
        }
    }
    return true;
}
Also used : TunnelInfo(net.i2p.router.TunnelInfo)

Example 9 with TunnelInfo

use of net.i2p.router.TunnelInfo in project i2p.i2p by i2p.

the class InboundMessageDistributor method distribute.

public void distribute(I2NPMessage msg, Hash target, TunnelId tunnel) {
    if (_log.shouldLog(Log.DEBUG))
        _log.debug("IBMD for " + _client + " to " + target + " / " + tunnel + " : " + msg);
    // allow messages on client tunnels even after client disconnection, as it may
    // include e.g. test messages, etc.  DataMessages will be dropped anyway
    /*
        if ( (_client != null) && (!_context.clientManager().isLocal(_client)) ) {
            if (_log.shouldLog(Log.INFO))
                _log.info("Not distributing a message, as it came down a client's tunnel (" 
                          + _client.toBase64() + ") after the client disconnected: " + msg);
            return;
        }
        */
    int type = msg.getType();
    // if the message came down a client tunnel:
    if (_client != null) {
        switch(type) {
            case DatabaseSearchReplyMessage.MESSAGE_TYPE:
                /**
                 **
                 *                     DatabaseSearchReplyMessage orig = (DatabaseSearchReplyMessage) msg;
                 *                     if (orig.getNumReplies() > 0) {
                 *                         if (_log.shouldLog(Log.INFO))
                 *                             _log.info("Removing replies from a DSRM down a tunnel for " + _client + ": " + msg);
                 *                         DatabaseSearchReplyMessage newMsg = new DatabaseSearchReplyMessage(_context);
                 *                         newMsg.setFromHash(orig.getFromHash());
                 *                         newMsg.setSearchKey(orig.getSearchKey());
                 *                         msg = newMsg;
                 *                     }
                 ***
                 */
                break;
            case DatabaseStoreMessage.MESSAGE_TYPE:
                DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
                if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
                    // Todo: if peer was ff and RI is not ff, queue for exploration in netdb (but that isn't part of the facade now)
                    if (_log.shouldLog(Log.WARN))
                        _log.warn("Dropping DSM down a tunnel for " + _client + ": " + msg);
                    // Handle safely by just updating the caps table, after doing basic validation
                    Hash key = dsm.getKey();
                    if (_context.routerHash().equals(key))
                        return;
                    RouterInfo ri = (RouterInfo) dsm.getEntry();
                    if (!key.equals(ri.getIdentity().getHash()))
                        return;
                    if (!ri.isValid())
                        return;
                    RouterInfo oldri = _context.netDb().lookupRouterInfoLocally(key);
                    // only update if RI is newer and non-ff
                    if (oldri != null && oldri.getPublished() < ri.getPublished() && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
                        if (_log.shouldLog(Log.WARN))
                            _log.warn("Updating caps for RI " + key + " from \"" + oldri.getCapabilities() + "\" to \"" + ri.getCapabilities() + '"');
                        _context.peerManager().setCapabilities(key, ri.getCapabilities());
                    }
                    return;
                } else if (dsm.getReplyToken() != 0) {
                    _context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
                    _log.error("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
                    return;
                } else {
                    // allow DSM of our own key (used by FloodfillVerifyStoreJob)
                    // or other keys (used by IterativeSearchJob)
                    // as long as there's no reply token (we will never set a reply token but an attacker might)
                    ((LeaseSet) dsm.getEntry()).setReceivedAsReply();
                }
                break;
            case DeliveryStatusMessage.MESSAGE_TYPE:
            case GarlicMessage.MESSAGE_TYPE:
            case TunnelBuildReplyMessage.MESSAGE_TYPE:
            case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
                // these are safe, handled below
                break;
            default:
                // drop it, since we should only get the above message types down
                // client tunnels
                _context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
                _log.error("Dropped dangerous message down a tunnel for " + _client + ": " + msg, new Exception("cause"));
                return;
        }
    // switch
    } else {
        // expl. tunnel
        switch(type) {
            case DatabaseStoreMessage.MESSAGE_TYPE:
                DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
                if (dsm.getReplyToken() != 0) {
                    _context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
                    _log.error("Dropping DSM w/ reply token down a expl. tunnel: " + msg);
                    return;
                }
                if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET)
                    ((LeaseSet) dsm.getEntry()).setReceivedAsReply();
                break;
            case DatabaseSearchReplyMessage.MESSAGE_TYPE:
            case DeliveryStatusMessage.MESSAGE_TYPE:
            case GarlicMessage.MESSAGE_TYPE:
            case TunnelBuildReplyMessage.MESSAGE_TYPE:
            case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
                // these are safe, handled below
                break;
            default:
                _context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
                _log.error("Dropped dangerous message down expl tunnel: " + msg, new Exception("cause"));
                return;
        }
    // switch
    }
    if ((target == null) || ((tunnel == null) && (_context.routerHash().equals(target)))) {
        // make sure we don't honor any remote requests directly (garlic instructions, etc)
        if (type == GarlicMessage.MESSAGE_TYPE) {
            // in case we're looking for replies to a garlic message (cough load tests cough)
            _context.inNetMessagePool().handleReplies(msg);
            // if (_log.shouldLog(Log.DEBUG))
            // _log.debug("received garlic message in the tunnel, parse it out");
            _receiver.receive((GarlicMessage) msg);
        } else {
            if (_log.shouldLog(Log.INFO))
                _log.info("distributing inbound tunnel message into our inNetMessagePool: " + msg);
            _context.inNetMessagePool().add(msg, null, null);
        }
    /**
     **** latency measuring attack?
     *        } else if (_context.routerHash().equals(target)) {
     *            // the want to send it to a tunnel, except we are also that tunnel's gateway
     *            // dispatch it directly
     *            if (_log.shouldLog(Log.INFO))
     *                _log.info("distributing inbound tunnel message back out, except we are the gateway");
     *            TunnelGatewayMessage gw = new TunnelGatewayMessage(_context);
     *            gw.setMessage(msg);
     *            gw.setTunnelId(tunnel);
     *            gw.setMessageExpiration(_context.clock().now()+10*1000);
     *            gw.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
     *            _context.tunnelDispatcher().dispatch(gw);
     *****
     */
    } else {
        // ok, they want us to send it remotely, but that'd bust our anonymity,
        // so we send it out a tunnel first
        // TODO use the OCMOSJ cache to pick OB tunnel we are already using?
        TunnelInfo out = _context.tunnelManager().selectOutboundTunnel(_client, target);
        if (out == null) {
            if (_log.shouldLog(Log.WARN))
                _log.warn("no outbound tunnel to send the client message for " + _client + ": " + msg);
            return;
        }
        if (_log.shouldLog(Log.DEBUG))
            _log.debug("distributing IB tunnel msg type " + type + " back out " + out + " targetting " + target);
        TunnelId outId = out.getSendTunnelId(0);
        if (outId == null) {
            if (_log.shouldLog(Log.ERROR))
                _log.error("strange? outbound tunnel has no outboundId? " + out + " failing to distribute " + msg);
            return;
        }
        long exp = _context.clock().now() + 20 * 1000;
        if (msg.getMessageExpiration() < exp)
            msg.setMessageExpiration(exp);
        _context.tunnelDispatcher().dispatchOutbound(msg, outId, tunnel, target);
    }
}
Also used : RouterInfo(net.i2p.data.router.RouterInfo) DatabaseStoreMessage(net.i2p.data.i2np.DatabaseStoreMessage) TunnelInfo(net.i2p.router.TunnelInfo) Hash(net.i2p.data.Hash) TunnelId(net.i2p.data.TunnelId)

Example 10 with TunnelInfo

use of net.i2p.router.TunnelInfo in project i2p.i2p by i2p.

the class PeerTestJob method testPeer.

/**
 * Fire off the necessary jobs and messages to test the given peer
 * The message is a store of the peer's RI to itself,
 * with a reply token.
 */
private void testPeer(RouterInfo peer) {
    TunnelInfo inTunnel = getInboundTunnelId();
    if (inTunnel == null) {
        _log.warn("No tunnels to get peer test replies through!");
        return;
    }
    TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
    RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
    if (inGateway == null) {
        if (_log.shouldLog(Log.WARN))
            _log.warn("We can't find the gateway to our inbound tunnel?! Impossible?");
        return;
    }
    int timeoutMs = getTestTimeout();
    long expiration = getContext().clock().now() + timeoutMs;
    long nonce = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE - 1);
    DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
    TunnelInfo outTunnel = getOutboundTunnelId();
    if (outTunnel == null) {
        _log.warn("No tunnels to send search out through! Something is wrong...");
        return;
    }
    TunnelId outTunnelId = outTunnel.getSendTunnelId(0);
    if (_log.shouldLog(Log.DEBUG))
        _log.debug(getJobId() + ": Sending peer test to " + peer.getIdentity().getHash().toBase64() + " out " + outTunnel + " w/ replies through " + inTunnel);
    ReplySelector sel = new ReplySelector(peer.getIdentity().getHash(), nonce, expiration);
    PeerReplyFoundJob reply = new PeerReplyFoundJob(getContext(), peer, inTunnel, outTunnel);
    PeerReplyTimeoutJob timeoutJob = new PeerReplyTimeoutJob(getContext(), peer, inTunnel, outTunnel, sel);
    getContext().messageRegistry().registerPending(sel, reply, timeoutJob);
    getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, null, peer.getIdentity().getHash());
}
Also used : RouterInfo(net.i2p.data.router.RouterInfo) DatabaseStoreMessage(net.i2p.data.i2np.DatabaseStoreMessage) TunnelInfo(net.i2p.router.TunnelInfo) TunnelId(net.i2p.data.TunnelId)

Aggregations

TunnelInfo (net.i2p.router.TunnelInfo)30 Hash (net.i2p.data.Hash)15 TunnelId (net.i2p.data.TunnelId)8 RouterInfo (net.i2p.data.router.RouterInfo)6 ArrayList (java.util.ArrayList)4 DatabaseLookupMessage (net.i2p.data.i2np.DatabaseLookupMessage)4 DatabaseStoreMessage (net.i2p.data.i2np.DatabaseStoreMessage)4 I2NPMessage (net.i2p.data.i2np.I2NPMessage)4 TunnelManagerFacade (net.i2p.router.TunnelManagerFacade)4 Job (net.i2p.router.Job)3 TunnelPoolSettings (net.i2p.router.TunnelPoolSettings)3 HashSet (java.util.HashSet)2 OutNetMessage (net.i2p.router.OutNetMessage)2 ReplyJob (net.i2p.router.ReplyJob)2 Date (java.util.Date)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 TreeSet (java.util.TreeSet)1