Search in sources :

Example 6 with DatabaseEntry

use of net.i2p.data.DatabaseEntry in project i2p.i2p by i2p.

the class KademliaNetworkDatabaseFacade method lookupLeaseSetLocally.

/**
 *  Use lookupDestination() if you don't need the LS or don't need it validated.
 */
public LeaseSet lookupLeaseSetLocally(Hash key) {
    if (!_initialized)
        return null;
    DatabaseEntry ds = _ds.get(key);
    if (ds != null) {
        if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
            LeaseSet ls = (LeaseSet) ds;
            if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
                return ls;
            } else {
                fail(key);
                // this was an interesting key, so either refetch it or simply explore with it
                _exploreKeys.add(key);
                return null;
            }
        } else {
            // _log.debug("Looking for a lease set [" + key + "] but it ISN'T a leaseSet! " + ds, new Exception("Who thought that router was a lease?"));
            return null;
        }
    } else {
        return null;
    }
}
Also used : LeaseSet(net.i2p.data.LeaseSet) DatabaseEntry(net.i2p.data.DatabaseEntry)

Example 7 with DatabaseEntry

use of net.i2p.data.DatabaseEntry in project i2p.i2p by i2p.

the class KademliaNetworkDatabaseFacade method fail.

/**
 *   Final remove for a leaseset.
 *   For a router info, will look up in the network before dropping.
 */
public void fail(Hash dbEntry) {
    if (!_initialized)
        return;
    DatabaseEntry o = _ds.get(dbEntry);
    if (o == null) {
        // if we dont know the key, lets make sure it isn't a now-dead peer
        _kb.remove(dbEntry);
        _context.peerManager().removeCapabilities(dbEntry);
        return;
    }
    if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
        lookupBeforeDropping(dbEntry, (RouterInfo) o);
        return;
    }
    // are any updates
    if (_log.shouldLog(Log.INFO))
        _log.info("Dropping a lease: " + dbEntry);
    _ds.remove(dbEntry, false);
}
Also used : DatabaseEntry(net.i2p.data.DatabaseEntry)

Example 8 with DatabaseEntry

use of net.i2p.data.DatabaseEntry in project i2p.i2p by i2p.

the class SearchUpdateReplyFoundJob method runJob.

public void runJob() {
    if (_isFloodfillPeer)
        _job.decrementOutstandingFloodfillSearches();
    I2NPMessage message = _message;
    if (_log.shouldLog(Log.INFO))
        _log.info(getJobId() + ": Reply from " + _peer.toBase64() + " with message " + message.getClass().getSimpleName());
    long howLong = System.currentTimeMillis() - _sentOn;
    // assume requests are 1KB (they're almost always much smaller, but tunnels have a fixed size)
    int msgSize = 1024;
    if (_replyTunnel != null) {
        for (int i = 0; i < _replyTunnel.getLength(); i++) getContext().profileManager().tunnelDataPushed(_replyTunnel.getPeer(i), howLong, msgSize);
        _replyTunnel.incrementVerifiedBytesTransferred(msgSize);
    }
    if (_outTunnel != null) {
        for (int i = 0; i < _outTunnel.getLength(); i++) getContext().profileManager().tunnelDataPushed(_outTunnel.getPeer(i), howLong, msgSize);
        _outTunnel.incrementVerifiedBytesTransferred(msgSize);
    }
    if (message instanceof DatabaseStoreMessage) {
        long timeToReply = _state.dataFound(_peer);
        DatabaseStoreMessage msg = (DatabaseStoreMessage) message;
        DatabaseEntry entry = msg.getEntry();
        try {
            _facade.store(msg.getKey(), entry);
            getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
        } catch (UnsupportedCryptoException iae) {
            // don't blame the peer
            getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
            _state.abort();
        // searchNext() will call fail()
        } catch (IllegalArgumentException iae) {
            if (_log.shouldLog(Log.WARN))
                _log.warn("Peer " + _peer + " sent us invalid data: ", iae);
            // blame the peer
            getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
        }
    } else if (message instanceof DatabaseSearchReplyMessage) {
        _job.replyFound((DatabaseSearchReplyMessage) message, _peer);
    } else {
        if (_log.shouldLog(Log.ERROR))
            _log.error(getJobId() + ": What?! Reply job matched a strange message: " + message);
        return;
    }
    _job.searchNext();
}
Also used : I2NPMessage(net.i2p.data.i2np.I2NPMessage) DatabaseStoreMessage(net.i2p.data.i2np.DatabaseStoreMessage) DatabaseSearchReplyMessage(net.i2p.data.i2np.DatabaseSearchReplyMessage) DatabaseEntry(net.i2p.data.DatabaseEntry)

Example 9 with DatabaseEntry

use of net.i2p.data.DatabaseEntry in project i2p.i2p by i2p.

the class SearchJob method continueSearch.

/**
 * Send a series of searches to the next available peers as selected by
 * the routing table, but making sure no more than SEARCH_BREDTH are outstanding
 * at any time
 */
protected void continueSearch() {
    if (_state.completed()) {
        if (_log.shouldLog(Log.DEBUG))
            _log.debug(getJobId() + ": Search already completed", new Exception("already completed"));
        return;
    }
    int toCheck = getBredth() - _state.getPending().size();
    if (toCheck <= 0) {
        // too many already pending
        if (_log.shouldLog(Log.INFO))
            _log.info(getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + getBredth() + ")");
        requeuePending();
        return;
    }
    int sent = 0;
    Set<Hash> attempted = _state.getAttempted();
    while (sent <= 0) {
        // boolean onlyFloodfill = onlyQueryFloodfillPeers(getContext());
        boolean onlyFloodfill = true;
        if (_floodfillPeersExhausted && onlyFloodfill && _state.getPending().isEmpty()) {
            if (_log.shouldLog(Log.WARN))
                _log.warn(getJobId() + ": no non-floodfill peers left, and no more pending.  Searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size());
            fail();
            return;
        }
        List<Hash> closestHashes = getClosestRouters(_state.getTarget(), toCheck, attempted);
        if ((closestHashes == null) || (closestHashes.isEmpty())) {
            if (_state.getPending().isEmpty()) {
                // we tried to find some peers, but there weren't any and no one else is going to answer
                if (_log.shouldLog(Log.INFO))
                    _log.info(getJobId() + ": No peers left, and none pending!  Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size());
                fail();
            } else {
                // no more to try, but we might get data or close peers from some outstanding requests
                if (_log.shouldLog(Log.INFO))
                    _log.info(getJobId() + ": No peers left, but some are pending!  Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size());
                requeuePending();
            }
            return;
        } else {
            attempted.addAll(closestHashes);
            for (Hash peer : closestHashes) {
                DatabaseEntry ds = _facade.getDataStore().get(peer);
                if (ds == null) {
                    if (_log.shouldLog(Log.INFO))
                        _log.info("Next closest peer " + peer + " was only recently referred to us, sending a search for them");
                    getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs);
                } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
                    if (_log.shouldLog(Log.WARN))
                        _log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds.getClass().getName());
                    _state.replyTimeout(peer);
                } else {
                    RouterInfo ri = (RouterInfo) ds;
                    if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
                        _floodfillPeersExhausted = true;
                        if (onlyFloodfill)
                            continue;
                    }
                    if (ri.isHidden()) {
                    // || // allow querying banlisted, since its indirect
                    // getContext().banlist().isBanlisted(peer)) {
                    // dont bother
                    } else {
                        _state.addPending(peer);
                        sendSearch((RouterInfo) ds);
                        sent++;
                    }
                }
            }
        /*
                if (sent <= 0) {
                    // the (potentially) last peers being searched for could not be,
                    // er, searched for, so lets retry ASAP (causing either another 
                    // peer to be selected, or the whole search to fail)
                    if (_log.shouldLog(Log.INFO))
                        _log.info(getJobId() + ": No new peer queued up, so we are going to requeue " +
                                  "ourselves in our search for " + _state.getTarget().toBase64());
                    requeuePending(0);
                }
                 */
        }
    }
}
Also used : RouterInfo(net.i2p.data.router.RouterInfo) DatabaseEntry(net.i2p.data.DatabaseEntry) Hash(net.i2p.data.Hash)

Example 10 with DatabaseEntry

use of net.i2p.data.DatabaseEntry in project i2p.i2p by i2p.

the class StoreJob method continueSending.

/**
 * Send a series of searches to the next available peers as selected by
 * the routing table, but making sure no more than PARALLELIZATION are outstanding
 * at any time
 *
 * Caller should synchronize to enforce parallelization limits and prevent dups
 */
private synchronized void continueSending() {
    if (_state.completed())
        return;
    int toCheck = getParallelization() - _state.getPending().size();
    if (toCheck <= 0) {
        // too many already pending
        if (_log.shouldLog(Log.DEBUG))
            _log.debug(getJobId() + ": Too many store messages pending");
        return;
    }
    if (toCheck > getParallelization())
        toCheck = getParallelization();
    // We are going to send the RouterInfo directly, rather than through a lease,
    // so select a floodfill peer we are already connected to.
    // This will help minimize active connections for floodfill peers and allow
    // the network to scale.
    // Perhaps the ultimate solution is to send RouterInfos through a lease also.
    List<Hash> closestHashes;
    // if (_state.getData() instanceof RouterInfo)
    // closestHashes = getMostReliableRouters(_state.getTarget(), toCheck, _state.getAttempted());
    // else
    // closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
    closestHashes = getClosestFloodfillRouters(_state.getTarget(), toCheck, _state.getAttempted());
    if ((closestHashes == null) || (closestHashes.isEmpty())) {
        if (_state.getPending().isEmpty()) {
            if (_log.shouldLog(Log.INFO))
                _log.info(getJobId() + ": No more peers left and none pending");
            fail();
        } else {
            if (_log.shouldLog(Log.INFO))
                _log.info(getJobId() + ": No more peers left but some are pending, so keep waiting");
            return;
        }
    } else {
        // _state.addPending(closestHashes);
        int queued = 0;
        int skipped = 0;
        for (Hash peer : closestHashes) {
            DatabaseEntry ds = _facade.getDataStore().get(peer);
            if ((ds == null) || !(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
                if (_log.shouldLog(Log.INFO))
                    _log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds);
                _state.addSkipped(peer);
                skipped++;
            } else if (!shouldStoreTo((RouterInfo) ds)) {
                if (_log.shouldLog(Log.INFO))
                    _log.info(getJobId() + ": Skipping old router " + peer);
                _state.addSkipped(peer);
                skipped++;
            /**
             **
             *   above shouldStoreTo() check is newer than these two checks, so we're covered
             *
             *                } else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
             *                           !supportsCert((RouterInfo)ds,
             *                                         ((LeaseSet)_state.getData()).getDestination().getCertificate())) {
             *                    if (_log.shouldLog(Log.INFO))
             *                        _log.info(getJobId() + ": Skipping router that doesn't support key certs " + peer);
             *                    _state.addSkipped(peer);
             *                    skipped++;
             *                } else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
             *                           ((LeaseSet)_state.getData()).getLeaseCount() > 6 &&
             *                           !supportsBigLeaseSets((RouterInfo)ds)) {
             *                    if (_log.shouldLog(Log.INFO))
             *                        _log.info(getJobId() + ": Skipping router that doesn't support big leasesets " + peer);
             *                    _state.addSkipped(peer);
             *                    skipped++;
             ***
             */
            } else {
                int peerTimeout = _facade.getPeerTimeout(peer);
                // if (!((RouterInfo)ds).isHidden()) {
                if (_log.shouldLog(Log.INFO))
                    _log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
                _state.addPending(peer);
                sendStore((RouterInfo) ds, peerTimeout);
                queued++;
            // }
            }
        }
        if (queued == 0 && _state.getPending().isEmpty()) {
            if (_log.shouldLog(Log.INFO))
                _log.info(getJobId() + ": No more peers left after skipping " + skipped + " and none pending");
            // queue a job to go around again rather than recursing
            getContext().jobQueue().addJob(new WaitJob(getContext()));
        }
    }
}
Also used : RouterInfo(net.i2p.data.router.RouterInfo) DatabaseEntry(net.i2p.data.DatabaseEntry) Hash(net.i2p.data.Hash)

Aggregations

DatabaseEntry (net.i2p.data.DatabaseEntry)16 Hash (net.i2p.data.Hash)11 RouterInfo (net.i2p.data.router.RouterInfo)9 LeaseSet (net.i2p.data.LeaseSet)4 Date (java.util.Date)2 HashSet (java.util.HashSet)2 DatabaseStoreMessage (net.i2p.data.i2np.DatabaseStoreMessage)2 ArrayList (java.util.ArrayList)1 Collection (java.util.Collection)1 Map (java.util.Map)1 NoSuchElementException (java.util.NoSuchElementException)1 Set (java.util.Set)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 DatabaseLookupMessage (net.i2p.data.i2np.DatabaseLookupMessage)1 DatabaseSearchReplyMessage (net.i2p.data.i2np.DatabaseSearchReplyMessage)1 I2NPMessage (net.i2p.data.i2np.I2NPMessage)1