use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class SearchReplyJob method runJob.
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
if (_log.shouldLog(Log.DEBUG) && _msg.getNumReplies() == 0)
_log.debug(getJobId() + ": dbSearchReply received with no routers referenced");
if (_repliesPendingVerification > 0) {
// we received new references from the peer, but still
// haven't verified all of them, so lets give it more time
requeue(_searchJob.timeoutMs());
} else {
// either they didn't tell us anything new or we have verified
// (or failed to verify) all of them. we're done
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers, _invalidPeers, _duplicatePeers, _duration);
if (_newPeers > 0)
_searchJob.newPeersFound(_newPeers);
}
} else {
Hash peer = _msg.getReply(_curIndex);
boolean shouldAdd = false;
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(peer);
if (info == null) {
// if the peer is giving us lots of bad peer references,
// dont try to fetch them.
boolean sendsBadInfo = getContext().profileOrganizer().peerSendsBadReplies(_peer);
if (!sendsBadInfo) {
// This keeps us from continually chasing blocklisted floodfills
if (getContext().banlist().isBanlisted(peer)) {
// if (_log.shouldLog(Log.INFO))
// _log.info("Not looking for a banlisted peer...");
// getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1, 0);
} else {
// getContext().netDb().lookupRouterInfo(peer, new ReplyVerifiedJob(getContext(), peer), new ReplyNotVerifiedJob(getContext(), peer), _timeoutMs);
// _repliesPendingVerification++;
shouldAdd = true;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Peer " + _peer.toBase64() + " sends us bad replies, so not verifying " + peer.toBase64());
getContext().statManager().addRateData("netDb.searchReplyValidationSkipped", 1);
}
}
if (_searchJob.wasAttempted(peer)) {
_duplicatePeers++;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dbSearchReply received on search referencing router " + peer + " already known? " + (info != null));
if (shouldAdd) {
if (_searchJob.add(peer))
_newPeers++;
else
_seenPeers++;
}
_curIndex++;
requeue(0);
}
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class StartExplorersJob method getNextRunDelay.
/**
* How long should we wait before exploring?
* We wait as long as it's been since we were last successful,
* with exceptions.
*/
private long getNextRunDelay() {
// we don't explore if floodfill
if (_facade.floodfillEnabled())
return MAX_RERUN_DELAY_MS;
// Use DataStore.size() which includes leasesets because it's faster
if (getContext().router().getUptime() < STARTUP_TIME || _facade.getDataStore().size() < MIN_ROUTERS || getContext().router().isHidden())
return MIN_RERUN_DELAY_MS;
RouterInfo ri = getContext().router().getRouterInfo();
if (ri != null && ri.getCapabilities().contains("" + Router.CAPABILITY_BW12))
return MIN_RERUN_DELAY_MS;
if (_facade.getDataStore().size() > MAX_ROUTERS)
return MAX_RERUN_DELAY_MS;
long delay = getContext().clock().now() - _facade.getLastExploreNewDate();
if (delay < MIN_RERUN_DELAY_MS)
return MIN_RERUN_DELAY_MS;
else if (delay > MAX_RERUN_DELAY_MS)
return MAX_RERUN_DELAY_MS;
else
return delay;
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class StoreJob method continueSending.
/**
* Send a series of searches to the next available peers as selected by
* the routing table, but making sure no more than PARALLELIZATION are outstanding
* at any time
*
* Caller should synchronize to enforce parallelization limits and prevent dups
*/
private synchronized void continueSending() {
if (_state.completed())
return;
int toCheck = getParallelization() - _state.getPending().size();
if (toCheck <= 0) {
// too many already pending
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Too many store messages pending");
return;
}
if (toCheck > getParallelization())
toCheck = getParallelization();
// We are going to send the RouterInfo directly, rather than through a lease,
// so select a floodfill peer we are already connected to.
// This will help minimize active connections for floodfill peers and allow
// the network to scale.
// Perhaps the ultimate solution is to send RouterInfos through a lease also.
List<Hash> closestHashes;
// if (_state.getData() instanceof RouterInfo)
// closestHashes = getMostReliableRouters(_state.getTarget(), toCheck, _state.getAttempted());
// else
// closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
closestHashes = getClosestFloodfillRouters(_state.getTarget(), toCheck, _state.getAttempted());
if ((closestHashes == null) || (closestHashes.isEmpty())) {
if (_state.getPending().isEmpty()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": No more peers left and none pending");
fail();
} else {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": No more peers left but some are pending, so keep waiting");
return;
}
} else {
// _state.addPending(closestHashes);
int queued = 0;
int skipped = 0;
for (Hash peer : closestHashes) {
DatabaseEntry ds = _facade.getDataStore().get(peer);
if ((ds == null) || !(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds);
_state.addSkipped(peer);
skipped++;
} else if (!shouldStoreTo((RouterInfo) ds)) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Skipping old router " + peer);
_state.addSkipped(peer);
skipped++;
/**
**
* above shouldStoreTo() check is newer than these two checks, so we're covered
*
* } else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
* !supportsCert((RouterInfo)ds,
* ((LeaseSet)_state.getData()).getDestination().getCertificate())) {
* if (_log.shouldLog(Log.INFO))
* _log.info(getJobId() + ": Skipping router that doesn't support key certs " + peer);
* _state.addSkipped(peer);
* skipped++;
* } else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
* ((LeaseSet)_state.getData()).getLeaseCount() > 6 &&
* !supportsBigLeaseSets((RouterInfo)ds)) {
* if (_log.shouldLog(Log.INFO))
* _log.info(getJobId() + ": Skipping router that doesn't support big leasesets " + peer);
* _state.addSkipped(peer);
* skipped++;
***
*/
} else {
int peerTimeout = _facade.getPeerTimeout(peer);
// if (!((RouterInfo)ds).isHidden()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
_state.addPending(peer);
sendStore((RouterInfo) ds, peerTimeout);
queued++;
// }
}
}
if (queued == 0 && _state.getPending().isEmpty()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": No more peers left after skipping " + skipped + " and none pending");
// queue a job to go around again rather than recursing
getContext().jobQueue().addJob(new WaitJob(getContext()));
}
}
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class PeerTestJob method runJob.
public void runJob() {
if (!_keepTesting)
return;
Set<RouterInfo> peers = selectPeersToTest();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing " + peers.size() + " peers");
for (RouterInfo peer : peers) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing peer " + peer.getIdentity().getHash().toBase64());
testPeer(peer);
}
requeue(getPeerTestDelay());
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class PeerTestJob method selectPeersToTest.
/**
* Retrieve a group of 0 or more peers that we want to test.
* Returned list will not include ourselves.
*
* @return set of RouterInfo structures
*/
private Set<RouterInfo> selectPeersToTest() {
PeerSelectionCriteria criteria = new PeerSelectionCriteria();
criteria.setMinimumRequired(getTestConcurrency());
criteria.setMaximumRequired(getTestConcurrency());
criteria.setPurpose(PeerSelectionCriteria.PURPOSE_TEST);
List<Hash> peerHashes = _manager.selectPeers(criteria);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer selection found " + peerHashes.size() + " peers");
Set<RouterInfo> peers = new HashSet<RouterInfo>(peerHashes.size());
for (Hash peer : peerHashes) {
RouterInfo peerInfo = getContext().netDb().lookupRouterInfoLocally(peer);
if (peerInfo != null) {
peers.add(peerInfo);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Test peer " + peer.toBase64() + " had no local routerInfo?");
}
}
return peers;
}
Aggregations