Search in sources :

Example 51 with Hash

use of net.i2p.data.Hash in project i2p.i2p by i2p.

the class IterativeSearchJob method failed.

/**
 *  Total failure
 */
@Override
void failed() {
    synchronized (this) {
        if (_dead)
            return;
        _dead = true;
    }
    _facade.complete(_key);
    if (getContext().commSystem().getStatus() != Status.DISCONNECTED)
        _facade.lookupFailed(_key);
    getContext().messageRegistry().unregisterPending(_out);
    int tries;
    final List<Hash> unheard;
    synchronized (this) {
        tries = _unheardFrom.size() + _failedPeers.size();
        unheard = new ArrayList<Hash>(_unheardFrom);
    }
    // blame the unheard-from (others already blamed in failed() above)
    for (Hash h : unheard) {
        getContext().profileManager().dbLookupFailed(h);
    }
    long time = System.currentTimeMillis() - _created;
    if (_log.shouldLog(Log.INFO)) {
        long timeRemaining = _expiration - getContext().clock().now();
        _log.info(getJobId() + ": ISJ for " + _key + " failed with " + timeRemaining + " remaining after " + time + ", peers queried: " + tries);
    }
    if (tries > 0) {
        // don't bias the stats with immediate fails
        getContext().statManager().addRateData("netDb.failedTime", time);
        getContext().statManager().addRateData("netDb.failedRetries", tries - 1);
    }
    for (Job j : _onFailed) {
        getContext().jobQueue().addJob(j);
    }
    _onFailed.clear();
}
Also used : Hash(net.i2p.data.Hash) ReplyJob(net.i2p.router.ReplyJob) Job(net.i2p.router.Job)

Example 52 with Hash

use of net.i2p.data.Hash in project i2p.i2p by i2p.

the class FloodOnlySearchJob method success.

@Override
void success() {
    synchronized (this) {
        if (_dead)
            return;
        _dead = true;
        super.success();
    }
    if (_log.shouldLog(Log.INFO))
        _log.info(getJobId() + ": Floodfill search for " + _key + " successful");
    // Sadly, we don't know which of the two replied, unless the first one sent a DSRM
    // before the second one sent the answer, which isn't that likely.
    // Would be really nice to fix this, but it isn't clear how unless CONCURRENT_SEARCHES == 1.
    // Maybe don't unregister the msg from the Registry for a while and see if we get a 2nd reply?
    // Or delay the 2nd search for a few seconds?
    // We'll have to rely primarily on other searches (ExploreJob which calls SearchJob,
    // and FloodfillVerifyStoreJob) to record successful searches for now.
    // StoreJob also calls dbStoreSent() which updates the lastHeardFrom timer - this also helps.
    long time = System.currentTimeMillis() - _created;
    synchronized (_unheardFrom) {
        if (_unheardFrom.size() == 1) {
            Hash peer = _unheardFrom.iterator().next();
            getContext().profileManager().dbLookupSuccessful(peer, time);
        }
    }
    _facade.complete(_key);
    getContext().statManager().addRateData("netDb.successTime", time);
    for (Job j : _onFind) {
        getContext().jobQueue().addJob(j);
    }
}
Also used : Hash(net.i2p.data.Hash) ReplyJob(net.i2p.router.ReplyJob) Job(net.i2p.router.Job)

Example 53 with Hash

use of net.i2p.data.Hash in project i2p.i2p by i2p.

the class FloodfillNetworkDatabaseFacade method flood.

/**
 *  Send to a subset of all floodfill peers.
 *  We do this to implement Kademlia within the floodfills, i.e.
 *  we flood to those closest to the key.
 */
public void flood(DatabaseEntry ds) {
    Hash key = ds.getHash();
    RouterKeyGenerator gen = _context.routerKeyGenerator();
    Hash rkey = gen.getRoutingKey(key);
    FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector();
    List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
    // todo key cert skip?
    long until = gen.getTimeTillMidnight();
    if (until < NEXT_RKEY_LS_ADVANCE_TIME || (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && until < NEXT_RKEY_RI_ADVANCE_TIME)) {
        // to avoid lookup faulures after midnight, also flood to some closest to the
        // next routing key for a period of time before midnight.
        Hash nkey = gen.getNextRoutingKey(key);
        List<Hash> nextPeers = sel.selectFloodfillParticipants(nkey, NEXT_FLOOD_QTY, getKBuckets());
        int i = 0;
        for (Hash h : nextPeers) {
            // But other implementations may not...
            if (h.equals(key))
                continue;
            // todo key cert skip?
            if (!peers.contains(h)) {
                peers.add(h);
                i++;
            }
        }
        if (i > 0 && _log.shouldLog(Log.INFO))
            _log.info("Flooding the entry for " + key + " to " + i + " more, just before midnight");
    }
    int flooded = 0;
    for (int i = 0; i < peers.size(); i++) {
        Hash peer = peers.get(i);
        RouterInfo target = lookupRouterInfoLocally(peer);
        if ((target == null) || (_context.banlist().isBanlisted(peer)))
            continue;
        // But other implementations may not...
        if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && peer.equals(key))
            continue;
        if (peer.equals(_context.routerHash()))
            continue;
        DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
        msg.setEntry(ds);
        OutNetMessage m = new OutNetMessage(_context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target);
        Job floodFail = new FloodFailedJob(_context, peer);
        m.setOnFailedSendJob(floodFail);
        // we want to give credit on success, even if we aren't sure,
        // because otherwise no use noting failure
        Job floodGood = new FloodSuccessJob(_context, peer);
        m.setOnSendJob(floodGood);
        _context.commSystem().processMessage(m);
        flooded++;
        if (_log.shouldLog(Log.INFO))
            _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
    }
    if (_log.shouldLog(Log.INFO))
        _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
}
Also used : OutNetMessage(net.i2p.router.OutNetMessage) RouterInfo(net.i2p.data.router.RouterInfo) RouterKeyGenerator(net.i2p.data.router.RouterKeyGenerator) DatabaseStoreMessage(net.i2p.data.i2np.DatabaseStoreMessage) Hash(net.i2p.data.Hash) Job(net.i2p.router.Job)

Example 54 with Hash

use of net.i2p.data.Hash in project i2p.i2p by i2p.

the class FloodfillPeerSelector method selectNearest.

/**
 * Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM.
 * UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory
 * lookup, and the response should not include floodfills.
 * List MAY INCLUDE our own router - add to peersToIgnore if you don't want
 *
 * @param key the original key (NOT the routing key)
 * @param peersToIgnore can be null
 * @return List of Hash for the peers selected, ordered
 */
@Override
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
    Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
    if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
        // return non-ff
        peersToIgnore.addAll(selectFloodfillParticipants(peersToIgnore, kbuckets));
        // TODO this is very slow
        FloodfillSelectionCollector matches = new FloodfillSelectionCollector(rkey, peersToIgnore, maxNumRouters);
        kbuckets.getAll(matches);
        return matches.get(maxNumRouters);
    } else {
        // return ff
        return selectFloodfillParticipantsIncludingUs(rkey, maxNumRouters, peersToIgnore, kbuckets);
    }
}
Also used : Hash(net.i2p.data.Hash)

Example 55 with Hash

use of net.i2p.data.Hash in project i2p.i2p by i2p.

the class FloodfillPeerSelector method selectFloodfillParticipants.

/**
 *  @param kbuckets now unused
 *  @param toIgnore can be null
 *  @return all floodfills not banlisted forever.
 *  List MAY INCLUDE our own hash.
 *  List is not sorted and not shuffled.
 */
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
    /**
     ***
     *        if (kbuckets == null) return Collections.EMPTY_LIST;
     *        // TODO this is very slow - use profile getPeersByCapability('f') instead
     *        _context.statManager().addRateData("netDb.newFSC", 0, 0);
     *        FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
     *        kbuckets.getAll(matches);
     *        return matches.getFloodfillParticipants();
     ****
     */
    Set<Hash> set = _context.peerManager().getPeersByCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL);
    List<Hash> rv = new ArrayList<Hash>(set.size());
    for (Hash h : set) {
        if ((toIgnore != null && toIgnore.contains(h)) || _context.banlist().isBanlistedForever(h))
            continue;
        rv.add(h);
    }
    return rv;
}
Also used : ArrayList(java.util.ArrayList) Hash(net.i2p.data.Hash)

Aggregations

Hash (net.i2p.data.Hash)235 RouterInfo (net.i2p.data.router.RouterInfo)45 ArrayList (java.util.ArrayList)29 TunnelId (net.i2p.data.TunnelId)20 Destination (net.i2p.data.Destination)18 HashSet (java.util.HashSet)17 ConvertToHash (net.i2p.util.ConvertToHash)17 IOException (java.io.IOException)16 TunnelInfo (net.i2p.router.TunnelInfo)15 DataFormatException (net.i2p.data.DataFormatException)14 Properties (java.util.Properties)13 Date (java.util.Date)12 DatabaseEntry (net.i2p.data.DatabaseEntry)11 SessionKey (net.i2p.data.SessionKey)11 RouterAddress (net.i2p.data.router.RouterAddress)11 DatabaseStoreMessage (net.i2p.data.i2np.DatabaseStoreMessage)9 I2NPMessage (net.i2p.data.i2np.I2NPMessage)9 Job (net.i2p.router.Job)9 OutNetMessage (net.i2p.router.OutNetMessage)9 TunnelPoolSettings (net.i2p.router.TunnelPoolSettings)8