use of net.i2p.data.i2np.DatabaseLookupMessage in project i2p.i2p by i2p.
the class FloodOnlySearchJob method runJob.
@Override
public void runJob() {
// pick some floodfill peers and send out the searches
// old
// List<Hash> floodfillPeers = _facade.getFloodfillPeers();
// new
List<Hash> floodfillPeers;
KBucketSet<Hash> ks = _facade.getKBuckets();
if (ks != null) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(_key);
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
// but we're passing the rkey not the key, so we do it below instead in certain cases.
floodfillPeers = ((FloodfillPeerSelector) _facade.getPeerSelector()).selectFloodfillParticipants(rkey, MIN_FOR_NO_DSRM, ks);
} else {
floodfillPeers = Collections.emptyList();
}
// If we dont know enough floodfills,
// or the global network routing key just changed (which is set at statrtup,
// so this includes the first few minutes of uptime)
_shouldProcessDSRM = floodfillPeers.size() < MIN_FOR_NO_DSRM || getContext().routingKeyGenerator().getLastChanged() > getContext().clock().now() - 60 * 60 * 1000;
if (floodfillPeers.isEmpty()) {
// so this situation should be temporary
if (_log.shouldLog(Log.WARN))
_log.warn("Running netDb searches against the floodfill peers, but we don't know any");
floodfillPeers = new ArrayList<Hash>(_facade.getAllRouters());
if (floodfillPeers.isEmpty()) {
if (_log.shouldLog(Log.ERROR))
_log.error("We don't know any peers at all");
failed();
return;
}
Collections.shuffle(floodfillPeers, getContext().random());
}
// This OutNetMessage is never used or sent (setMessage() is never called), it's only
// so we can register a reply selector.
_out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout);
/**
******
* // We need to randomize our ff selection, else we stay with the same ones since
* // getFloodfillPeers() is sorted by closest distance. Always using the same
* // ones didn't help reliability.
* // Also, query the unheard-from, unprofiled, failing, unreachable and banlisted ones last.
* // We should hear from floodfills pretty frequently so set a 30m time limit.
* // If unprofiled we haven't talked to them in a long time.
* // We aren't contacting the peer directly, so banlist doesn't strictly matter,
* // but it's a bad sign, and we often banlist a peer before we fail it...
* if (floodfillPeers.size() > CONCURRENT_SEARCHES) {
* Collections.shuffle(floodfillPeers, getContext().random());
* List ffp = new ArrayList(floodfillPeers.size());
* int failcount = 0;
* long before = getContext().clock().now() - 30*60*1000;
* for (int i = 0; i < floodfillPeers.size(); i++) {
* Hash peer = (Hash)floodfillPeers.get(i);
* PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
* if (profile == null || profile.getLastHeardFrom() < before ||
* profile.getIsFailing() || getContext().banlist().isBanlisted(peer) ||
* getContext().commSystem().wasUnreachable(peer)) {
* failcount++;
* ffp.add(peer);
* } else
* ffp.add(0, peer);
* }
* // This will help us recover if the router just started and all the floodfills
* // have changed since the last time we were running
* if (floodfillPeers.size() - failcount <= 2)
* _shouldProcessDSRM = true;
* if (_log.shouldLog(Log.INFO) && failcount > 0)
* _log.info(getJobId() + ": " + failcount + " of " + floodfillPeers.size() + " floodfills are not heard from, unprofiled, failing, unreachable or banlisted");
* floodfillPeers = ffp;
* } else {
* _shouldProcessDSRM = true;
* }
*******
*/
// keep a separate count since _lookupsRemaining could be decremented elsewhere
int count = 0;
for (int i = 0; _lookupsRemaining.get() < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
continue;
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ((replyTunnel == null) || (outTunnel == null)) {
failed();
return;
}
// not being able to send to the floodfill, if we don't have an older netdb entry.
if (outTunnel.getLength() <= 1 && peer.equals(_key) && floodfillPeers.size() > 1)
continue;
synchronized (_unheardFrom) {
_unheardFrom.add(peer);
}
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key + " to " + peer);
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
count++;
_lookupsRemaining.incrementAndGet();
}
if (count <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key + " had no peers to send to");
// no floodfill peers, fail
failed();
}
}
use of net.i2p.data.i2np.DatabaseLookupMessage in project i2p.i2p by i2p.
the class FloodfillDatabaseLookupMessageHandler method createJob.
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_context.statManager().addRateData("netDb.lookupsReceived", 1);
DatabaseLookupMessage dlm = (DatabaseLookupMessage) receivedMessage;
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash);
// } else {
return j;
// }
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping lookup request for " + dlm.getSearchKey() + " (throttled), reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.lookupsDropped", 1, 1);
return null;
}
}
Aggregations