use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class IterativeLookupJob method runJob.
public void runJob() {
Hash from = _dsrm.getFromHash();
// those we sent the search to in _search
if (!_search.wasQueried(from)) {
if (_log.shouldLog(Log.WARN))
_log.warn(_search.getJobId() + ": ILJ DSRM from unqueried peer: " + _dsrm);
return;
}
// Chase the hashes from the reply
// 255 max, see comments in SingleLookupJob
int limit = Math.min(_dsrm.getNumReplies(), SingleLookupJob.MAX_TO_FOLLOW);
int newPeers = 0;
int oldPeers = 0;
int invalidPeers = 0;
for (int i = 0; i < limit; i++) {
Hash peer = _dsrm.getReply(i);
if (peer.equals(getContext().routerHash())) {
// us
oldPeers++;
continue;
}
if (peer.equals(from)) {
// unusual
invalidPeers++;
continue;
}
if (getContext().banlist().isBanlistedForever(peer)) {
oldPeers++;
continue;
}
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri == null) {
// Take it on faith that it's ff to speed things up, we don't need the RI
// to query it.
// Zero-hop outbound tunnel will be failed in ISJ.sendQuery()
_search.newPeerToTry(peer);
if (_search.getFromHash() == null) {
// get the RI from the peer that told us about it
// Only if original search used expl. tunnels
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, from));
} else {
// don't look it up as we don't have a good way to do it securely...
// add to expl. queue to look up later? no, probably not safe either
}
newPeers++;
} else if (ri.getPublished() < getContext().clock().now() - 60 * 60 * 1000 || !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
// Only if original search used expl. tunnels
if (_search.getFromHash() == null) {
getContext().jobQueue().addJob(new IterativeFollowupJob(getContext(), peer, peer, _search));
} else {
// for now, don't believe him, don't call newPeerToTry()
// is IFJ safe if we use the client tunnels?
}
oldPeers++;
} else {
// add it to the sorted queue
// this will check if we have already tried it
// should we really add? if we know about it but skipped it,
// it was for some reason?
_search.newPeerToTry(peer);
oldPeers++;
}
}
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": ILJ DSRM processed " + newPeers + '/' + oldPeers + '/' + invalidPeers + " new/old/invalid hashes");
long timeSent = _search.timeSent(from);
// assume 0 dup
if (timeSent > 0) {
getContext().profileManager().dbLookupReply(from, newPeers, oldPeers, invalidPeers, 0, getContext().clock().now() - timeSent);
}
_search.failed(_dsrm.getFromHash(), false);
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class IterativeSearchJob method sendQuery.
/**
* Send a DLM to the peer
*/
private void sendQuery(Hash peer) {
TunnelManagerFacade tm = getContext().tunnelManager();
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri != null) {
// Now that most of the netdb is Ed RIs and EC LSs, don't even bother
// querying old floodfills that don't know about those sig types.
// This is also more recent than the version that supports encrypted replies,
// so we won't request unencrypted replies anymore either.
String v = ri.getVersion();
String since = MIN_QUERY_VERSION;
if (VersionComparator.comp(v, since) < 0) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": not sending query to old version " + v + ": " + peer);
return;
}
}
TunnelInfo outTunnel;
TunnelInfo replyTunnel;
boolean isClientReplyTunnel;
boolean isDirect;
if (_fromLocalDest != null) {
outTunnel = tm.selectOutboundTunnel(_fromLocalDest, peer);
if (outTunnel == null)
outTunnel = tm.selectOutboundExploratoryTunnel(peer);
replyTunnel = tm.selectInboundTunnel(_fromLocalDest, peer);
isClientReplyTunnel = replyTunnel != null;
if (!isClientReplyTunnel)
replyTunnel = tm.selectInboundExploratoryTunnel(peer);
isDirect = false;
} else if ((!_isLease) && ri != null && getContext().commSystem().isEstablished(peer)) {
// If it's a RI lookup, not from a client, and we're already connected, just ask directly
// This also saves the ElG encryption for us and the decryption for the ff
// There's no anonymity reason to use an expl. tunnel... the main reason
// is to limit connections to the ffs. But if we're already connected,
// do it the fast and easy way.
outTunnel = null;
replyTunnel = null;
isClientReplyTunnel = false;
isDirect = true;
getContext().statManager().addRateData("netDb.RILookupDirect", 1);
} else {
outTunnel = tm.selectOutboundExploratoryTunnel(peer);
replyTunnel = tm.selectInboundExploratoryTunnel(peer);
isClientReplyTunnel = false;
isDirect = false;
getContext().statManager().addRateData("netDb.RILookupDirect", 0);
}
if ((!isDirect) && (replyTunnel == null || outTunnel == null)) {
failed();
return;
}
// not being able to send to the floodfill, if we don't have an older netdb entry.
if (outTunnel != null && outTunnel.getLength() <= 1) {
if (peer.equals(_key)) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": not doing zero-hop self-lookup of " + peer);
return;
}
if (_facade.lookupLocallyWithoutValidation(peer) == null) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": not doing zero-hop lookup to unknown " + peer);
return;
}
}
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
if (isDirect) {
dlm.setFrom(getContext().routerHash());
} else {
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
}
dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME);
dlm.setSearchKey(_key);
dlm.setSearchType(_isLease ? DatabaseLookupMessage.Type.LS : DatabaseLookupMessage.Type.RI);
if (_log.shouldLog(Log.INFO)) {
int tries;
synchronized (this) {
tries = _unheardFrom.size() + _failedPeers.size();
}
_log.info(getJobId() + ": ISJ try " + tries + " for " + (_isLease ? "LS " : "RI ") + _key + " to " + peer + " direct? " + isDirect + " reply via client tunnel? " + isClientReplyTunnel);
}
long now = getContext().clock().now();
_sentTime.put(peer, Long.valueOf(now));
I2NPMessage outMsg = null;
if (isDirect) {
// never wrap
} else if (_isLease || (getContext().getProperty(PROP_ENCRYPT_RI, DEFAULT_ENCRYPT_RI) && getContext().jobQueue().getMaxLag() < 300)) {
// if we have the ff RI, garlic encrypt it
if (ri != null) {
// if (DatabaseLookupMessage.supportsEncryptedReplies(ri)) {
if (true) {
MessageWrapper.OneTimeSession sess;
if (isClientReplyTunnel)
sess = MessageWrapper.generateSession(getContext(), _fromLocalDest);
else
sess = MessageWrapper.generateSession(getContext());
if (sess != null) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Requesting encrypted reply from " + peer + ' ' + sess.key + ' ' + sess.tag);
dlm.setReplySession(sess.key, sess.tag);
}
// else client went away, but send it anyway
}
outMsg = MessageWrapper.wrap(getContext(), dlm, ri);
// a response may have come in.
if (_dead) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": aborting send, finished while wrapping msg to " + peer);
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Encrypted DLM for " + _key + " to " + peer);
}
}
if (outMsg == null)
outMsg = dlm;
if (isDirect) {
OutNetMessage m = new OutNetMessage(getContext(), outMsg, outMsg.getMessageExpiration(), OutNetMessage.PRIORITY_MY_NETDB_LOOKUP, ri);
// Should always succeed, we are connected already
// m.setOnFailedReplyJob(onFail);
// m.setOnFailedSendJob(onFail);
// m.setOnReplyJob(onReply);
// m.setReplySelector(selector);
// getContext().messageRegistry().registerPending(m);
getContext().commSystem().processMessage(m);
} else {
getContext().tunnelDispatcher().dispatchOutbound(outMsg, outTunnel.getSendTunnelId(0), peer);
}
// The timeout job is always run (never cancelled)
// Note that the timeout is much shorter than the message expiration (see above)
Job j = new IterativeTimeoutJob(getContext(), peer, this);
long expire = Math.min(_expiration, now + _singleSearchTime);
j.getTiming().setStartAfter(expire);
getContext().jobQueue().addJob(j);
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class KademliaNetworkDatabaseFacade method startup.
public synchronized void startup() {
_log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo();
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
_kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(), BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
try {
_ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
throw new RuntimeException("Unable to initialize netdb storage", ioe);
}
// _ds = new TransientDataStore();
// _exploreKeys = new HashSet(64);
_dbDir = dbDir;
_negativeCache = new NegativeLookupCache(_context);
createHandlers();
_initialized = true;
_started = System.currentTimeMillis();
// expire old leases
Job elj = new ExpireLeasesJob(_context, this);
elj.getTiming().setStartAfter(_context.clock().now() + 2 * 60 * 1000);
_context.jobQueue().addJob(elj);
// Don't run until after RefreshRoutersJob has run, and after validate() will return invalid for old routers.
if (!_context.commSystem().isDummy()) {
Job erj = new ExpireRoutersJob(_context, this);
erj.getTiming().setStartAfter(_context.clock().now() + ROUTER_INFO_EXPIRATION_FLOODFILL + 10 * 60 * 1000);
_context.jobQueue().addJob(erj);
}
if (!QUIET) {
// _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this));
if (_exploreJob == null)
_exploreJob = new StartExplorersJob(_context, this);
// fire off a group of searches from the explore pool
// Don't start it right away, so we don't send searches for random keys
// out our 0-hop exploratory tunnels (generating direct connections to
// one or more floodfill peers within seconds of startup).
// We're trying to minimize the ff connections to lessen the load on the
// floodfills, and in any case let's try to build some real expl. tunnels first.
// No rush, it only runs every 30m.
_exploreJob.getTiming().setStartAfter(_context.clock().now() + EXPLORE_JOB_DELAY);
_context.jobQueue().addJob(_exploreJob);
} else {
_log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively");
_log.warn("This should NOT be used in production");
}
// periodically update and resign the router's 'published date', which basically
// serves as a version
Job plrij = new PublishLocalRouterInfoJob(_context);
// do not delay this, as this creates the RI too, and we need a good local routerinfo right away
// plrij.getTiming().setStartAfter(_context.clock().now() + PUBLISH_JOB_DELAY);
_context.jobQueue().addJob(plrij);
// plrij calls publish() for us
// try {
// publish(ri);
// } catch (IllegalArgumentException iae) {
// _context.router().rebuildRouterInfo(true);
// //_log.log(Log.CRIT, "Our local router info is b0rked, clearing from scratch", iae);
// //_context.router().rebuildNewIdentity();
// }
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class KademliaNetworkDatabaseFacade method store.
/**
* Store the routerInfo.
*
* If the store fails due to unsupported crypto, it will banlist
* the router hash until restart and then throw UnsupportedCrytpoException.
*
* @throws IllegalArgumentException if the routerInfo is not valid
* @throws UnsupportedCryptoException if that's why it failed.
* @return previous entry or null
*/
RouterInfo store(Hash key, RouterInfo routerInfo, boolean persist) throws IllegalArgumentException {
if (!_initialized)
return null;
RouterInfo rv = null;
try {
rv = (RouterInfo) _ds.get(key, persist);
if ((rv != null) && (rv.equals(routerInfo))) {
// no need to validate
return rv;
}
} catch (ClassCastException cce) {
throw new IllegalArgumentException("Attempt to replace LS with " + routerInfo);
}
// todo allow non-exp to overwrite exp
if (rv != null && !routerInfo.getIdentity().equals(rv.getIdentity()))
throw new IllegalArgumentException("RI Hash collision");
String err = validate(key, routerInfo);
if (err != null)
throw new IllegalArgumentException("Invalid store attempt - " + err);
// if (_log.shouldLog(Log.DEBUG))
// _log.debug("RouterInfo " + key.toBase64() + " is stored with "
// + routerInfo.getOptionsMap().size() + " options on "
// + new Date(routerInfo.getPublished()));
_context.peerManager().setCapabilities(key, routerInfo.getCapabilities());
_ds.put(key, routerInfo, persist);
if (rv == null)
_kb.add(key);
return rv;
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class SearchJob method resend.
/**
* After we get the data we were searching for, rebroadcast it to the peers
* we would query first if we were to search for it again (healing the network).
*/
private void resend() {
DatabaseEntry ds = _facade.lookupLeaseSetLocally(_state.getTarget());
if (ds == null) {
if (SHOULD_RESEND_ROUTERINFO) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
if (ds != null)
_facade.sendStore(_state.getTarget(), ds, null, null, RESEND_TIMEOUT, _state.getSuccessful());
}
} else {
// _state.getFailed();
Set<Hash> sendTo = _state.getRepliedPeers();
sendTo.addAll(_state.getPending());
int numSent = 0;
for (Hash peer : sendTo) {
RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer);
if (peerInfo == null)
continue;
if (resend(peerInfo, (LeaseSet) ds))
numSent++;
if (numSent >= MAX_LEASE_RESEND)
break;
}
getContext().statManager().addRateData("netDb.republishQuantity", numSent, numSent);
}
}
Aggregations