use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class TunnelPeerSelector method getExclude.
/**
* Pick peers that we want to avoid
*/
public Set<Hash> getExclude(boolean isInbound, boolean isExploratory) {
// we may want to update this to skip 'hidden' or 'unreachable' peers, but that
// isn't safe, since they may publish one set of routerInfo to us and another to
// other peers. the defaults for filterUnreachable has always been to return false,
// but might as well make it explicit with a "false &&"
//
// Unreachable peers at the inbound gateway is a major cause of problems.
// Due to a bug in SSU peer testing in 0.6.1.32 and earlier, peers don't know
// if they are unreachable, so the netdb indication won't help much.
// As of 0.6.1.33 we should have lots of unreachables, so enable this for now.
// Also (and more effectively) exclude peers we detect are unreachable,
// this should be much more effective, especially on a router that has been
// up a few hours.
//
// We could just try and exclude them as the inbound gateway but that's harder
// (and even worse for anonymity?).
//
// Defaults changed to true for inbound only in filterUnreachable below.
Set<Hash> peers = new HashSet<Hash>(8);
peers.addAll(ctx.profileOrganizer().selectPeersRecentlyRejecting());
peers.addAll(ctx.tunnelManager().selectPeersInTooManyTunnels());
// if (false && filterUnreachable(ctx, isInbound, isExploratory)) {
if (filterUnreachable(isInbound, isExploratory)) {
// NOTE: filterUnreachable returns true for inbound, false for outbound
// This is the only use for getPeersByCapability? And the whole set of datastructures in PeerManager?
Collection<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
if (caps != null)
peers.addAll(caps);
caps = ctx.profileOrganizer().selectPeersLocallyUnreachable();
if (caps != null)
peers.addAll(caps);
}
if (filterSlow(isInbound, isExploratory)) {
// NOTE: filterSlow always returns true
char[] excl = getExcludeCaps(ctx);
if (excl != null) {
FloodfillNetworkDatabaseFacade fac = (FloodfillNetworkDatabaseFacade) ctx.netDb();
List<RouterInfo> known = fac.getKnownRouterData();
if (known != null) {
for (int i = 0; i < known.size(); i++) {
RouterInfo peer = known.get(i);
boolean shouldExclude = shouldExclude(peer, excl);
if (shouldExclude) {
peers.add(peer.getIdentity().calculateHash());
continue;
}
/*
String cap = peer.getCapabilities();
if (cap == null) {
peers.add(peer.getIdentity().calculateHash());
continue;
}
for (int j = 0; j < excl.length; j++) {
if (cap.indexOf(excl[j]) >= 0) {
peers.add(peer.getIdentity().calculateHash());
continue;
}
}
int maxLen = 0;
if (cap.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) >= 0)
maxLen++;
if (cap.indexOf(Router.CAPABILITY_REACHABLE) >= 0)
maxLen++;
if (cap.indexOf(Router.CAPABILITY_UNREACHABLE) >= 0)
maxLen++;
if (cap.length() <= maxLen)
peers.add(peer.getIdentity().calculateHash());
// otherwise, it contains flags we aren't trying to focus on,
// so don't exclude it based on published capacity
if (filterUptime(ctx, isInbound, isExploratory)) {
Properties opts = peer.getOptions();
if (opts != null) {
String val = opts.getProperty("stat_uptime");
long uptimeMs = 0;
if (val != null) {
long factor = 1;
if (val.endsWith("ms")) {
factor = 1;
val = val.substring(0, val.length()-2);
} else if (val.endsWith("s")) {
factor = 1000l;
val = val.substring(0, val.length()-1);
} else if (val.endsWith("m")) {
factor = 60*1000l;
val = val.substring(0, val.length()-1);
} else if (val.endsWith("h")) {
factor = 60*60*1000l;
val = val.substring(0, val.length()-1);
} else if (val.endsWith("d")) {
factor = 24*60*60*1000l;
val = val.substring(0, val.length()-1);
}
try { uptimeMs = Long.parseLong(val); } catch (NumberFormatException nfe) {}
uptimeMs *= factor;
} else {
// not publishing an uptime, so exclude it
peers.add(peer.getIdentity().calculateHash());
continue;
}
long infoAge = ctx.clock().now() - peer.getPublished();
if (infoAge < 0) {
infoAge = 0;
} else if (infoAge > 24*60*60*1000) {
// Only exclude long-unseen peers if we haven't just started up
long DONT_EXCLUDE_PERIOD = 15*60*1000;
if (ctx.router().getUptime() < DONT_EXCLUDE_PERIOD) {
if (log.shouldLog(Log.DEBUG))
log.debug("Not excluding a long-unseen peer, since we just started up.");
} else {
if (log.shouldLog(Log.DEBUG))
log.debug("Excluding a long-unseen peer.");
peers.add(peer.getIdentity().calculateHash());
}
//peers.add(peer.getIdentity().calculateHash());
continue;
} else {
if (infoAge + uptimeMs < 2*60*60*1000) {
// up for less than 2 hours, so exclude it
peers.add(peer.getIdentity().calculateHash());
}
}
} else {
// not publishing stats, so exclude it
peers.add(peer.getIdentity().calculateHash());
continue;
}
}
*/
}
}
/*
for (int i = 0; i < excludeCaps.length(); i++) {
List matches = ctx.peerManager().getPeersByCapability(excludeCaps.charAt(i));
if (log.shouldLog(Log.INFO))
log.info("Filtering out " + matches.size() + " peers with capability " + excludeCaps.charAt(i));
peers.addAll(matches);
}
*/
}
}
return peers;
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class TunnelPeerSelector method getClosestHopExclude.
/**
* Pick peers that we want to avoid for the first OB hop or last IB hop.
* There's several cases of importance:
* <ol><li>Inbound and we are hidden -
* Exclude all unless connected.
* This is taken care of in ClientPeerSelector and TunnelPeerSelector selectPeers(), not here.
*
* <li>We are IPv6-only.
* Exclude all v4-only peers, unless connected
* This is taken care of here.
*
* <li>We have NTCP or SSU disabled.
* Exclude all incompatible peers, unless connected
* This is taken care of here.
*
* <li>Minimum version check, if we are some brand-new sig type,
* or are using some new tunnel build method.
* Not currently used, but this is where to implement the checks if needed.
* Make sure that ClientPeerSelector and TunnelPeerSelector selectPeers() call this when needed.
* </ol>
*
* Don't call this unless you need to.
* See ClientPeerSelector and TunnelPeerSelector selectPeers().
*
* @param isInbound
* @return null if none
* @since 0.9.17
*/
protected Set<Hash> getClosestHopExclude(boolean isInbound) {
RouterInfo ri = ctx.router().getRouterInfo();
if (ri == null)
return null;
// we can skip this check now, uncomment if we have some new sigtype
// SigType type = ri.getIdentity().getSigType();
// if (type == SigType.DSA_SHA1)
// return null;
int ourMask = isInbound ? getInboundMask(ri) : getOutboundMask(ri);
Set<Hash> connected = ctx.commSystem().getEstablished();
Set<Hash> rv = new HashSet<Hash>(256);
FloodfillNetworkDatabaseFacade fac = (FloodfillNetworkDatabaseFacade) ctx.netDb();
List<RouterInfo> known = fac.getKnownRouterData();
if (known != null) {
for (int i = 0; i < known.size(); i++) {
RouterInfo peer = known.get(i);
// we can skip this check now, uncomment if we have some breaking change
// String v = peer.getVersion();
// RI sigtypes added in 0.9.16
// SSU inbound connection bug fixed in 0.9.17, but it won't bid, so NTCP only,
// no need to check
// if (VersionComparator.comp(v, "0.9.16") < 0)
// rv.add(peer.getIdentity().calculateHash());
Hash h = peer.getIdentity().calculateHash();
if (connected.contains(h))
continue;
boolean canConnect = isInbound ? canConnect(peer, ourMask) : canConnect(ourMask, peer);
if (!canConnect)
rv.add(h);
}
}
return rv;
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class BuildHandler method handleRequest.
/**
* Decrypt the request, lookup the RI locally,
* and call handleReq() if found or queue a lookup job.
*
* @return handle time or -1 if it wasn't completely handled
*/
private long handleRequest(BuildMessageState state) {
long timeSinceReceived = _context.clock().now() - state.recvTime;
// if (_log.shouldLog(Log.DEBUG))
// _log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
Hash from = state.fromHash;
if (from == null && state.from != null)
from = state.from.calculateHash();
if (timeSinceReceived > (BuildRequestor.REQUEST_TIMEOUT * 3)) {
// don't even bother, since we are so overloaded locally
_context.throttle().setTunnelStatus(_x("Dropping tunnel requests: Overloaded"));
if (_log.shouldLog(Log.WARN))
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId() + ", since we received it a long time ago: " + timeSinceReceived);
_context.statManager().addRateData("tunnel.dropLoadDelay", timeSinceReceived);
if (from != null)
_context.commSystem().mayDisconnect(from);
return -1;
}
// ok, this is not our own tunnel, so we need to do some heavy lifting
// this not only decrypts the current hop's record, but encrypts the other records
// with the enclosed reply key
long beforeDecrypt = System.currentTimeMillis();
BuildRequestRecord req = _processor.decrypt(state.msg, _context.routerHash(), _context.keyManager().getPrivateKey());
long decryptTime = System.currentTimeMillis() - beforeDecrypt;
_context.statManager().addRateData("tunnel.decryptRequestTime", decryptTime);
if (decryptTime > 500 && _log.shouldLog(Log.WARN))
_log.warn("Took too long to decrypt the request: " + decryptTime + " for message " + state.msg.getUniqueId() + " received " + (timeSinceReceived + decryptTime) + " ago");
if (req == null) {
// no records matched, or the decryption failed. bah
if (_log.shouldLog(Log.WARN)) {
_log.warn("The request " + state.msg.getUniqueId() + " could not be decrypted from: " + from);
}
_context.statManager().addRateData("tunnel.dropDecryptFail", 1);
if (from != null)
_context.commSystem().mayDisconnect(from);
return -1;
}
long beforeLookup = System.currentTimeMillis();
Hash nextPeer = req.readNextIdentity();
long readPeerTime = System.currentTimeMillis() - beforeLookup;
RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer);
long lookupTime = System.currentTimeMillis() - beforeLookup;
if (lookupTime > 500 && _log.shouldLog(Log.WARN))
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for " + req);
if (nextPeerInfo == null) {
// limit concurrent next-hop lookups to prevent job queue overload attacks
int numTunnels = _context.tunnelManager().getParticipatingCount();
int limit = Math.max(MIN_LOOKUP_LIMIT, Math.min(MAX_LOOKUP_LIMIT, numTunnels * PERCENT_LOOKUP_LIMIT / 100));
int current;
// leaky counter, since it isn't reliable
if (_context.random().nextInt(16) > 0)
current = _currentLookups.incrementAndGet();
else
current = 1;
if (current <= limit) {
// don't let it go negative
if (current <= 0)
_currentLookups.set(1);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Request " + req + " handled, lookup next peer " + nextPeer + " lookups: " + current + '/' + limit);
_context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer), new TimeoutReq(_context, state, req, nextPeer), NEXT_HOP_LOOKUP_TIMEOUT);
} else {
_currentLookups.decrementAndGet();
if (_log.shouldLog(Log.WARN))
_log.warn("Drop next hop lookup, limit " + limit + ": " + req);
_context.statManager().addRateData("tunnel.dropLookupThrottle", 1);
if (from != null)
_context.commSystem().mayDisconnect(from);
}
return -1;
} else {
long beforeHandle = System.currentTimeMillis();
handleReq(nextPeerInfo, state, req, nextPeer);
long handleTime = System.currentTimeMillis() - beforeHandle;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Request " + req + " handled and we know the next peer " + nextPeer + " after " + handleTime + "/" + decryptTime + "/" + lookupTime + "/" + timeSinceReceived);
return handleTime;
}
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class BuildHandler method handleReply.
/**
* Blocking call to handle a single inbound reply
*/
private void handleReply(TunnelBuildReplyMessage msg, PooledTunnelCreatorConfig cfg, long delay) {
long requestedOn = cfg.getExpiration() - 10 * 60 * 1000;
long rtt = _context.clock().now() - requestedOn;
if (_log.shouldLog(Log.INFO))
_log.info(msg.getUniqueId() + ": Handling the reply after " + rtt + ", delayed " + delay + " waiting for " + cfg);
List<Integer> order = cfg.getReplyOrder();
int[] statuses = _buildReplyHandler.decrypt(msg, cfg, order);
if (statuses != null) {
boolean allAgree = true;
// For each peer in the tunnel
for (int i = 0; i < cfg.getLength(); i++) {
Hash peer = cfg.getPeer(i);
// Why must we save a slot for ourselves anyway?
if (peer.equals(_context.routerHash()))
continue;
int record = order.indexOf(Integer.valueOf(i));
if (record < 0) {
_log.error("Bad status index " + i);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
return;
}
int howBad = statuses[record];
// Look up routerInfo
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(peer);
// Default and detect bandwidth tier
String bwTier = "Unknown";
if (// Returns "Unknown" if none recognized
ri != null)
// Returns "Unknown" if none recognized
bwTier = ri.getBandwidthTier();
else if (_log.shouldLog(Log.WARN))
_log.warn("Failed detecting bwTier, null routerInfo for: " + peer);
// Record that a peer of the given tier agreed or rejected
if (howBad == 0) {
_context.statManager().addRateData("tunnel.tierAgree" + bwTier, 1);
} else {
_context.statManager().addRateData("tunnel.tierReject" + bwTier, 1);
}
if (_log.shouldLog(Log.INFO))
_log.info(msg.getUniqueId() + ": Peer " + peer + " replied with status " + howBad);
if (howBad == 0) {
// w3wt
_context.profileManager().tunnelJoined(peer, rtt);
} else {
allAgree = false;
switch(howBad) {
case TunnelHistory.TUNNEL_REJECT_BANDWIDTH:
_context.statManager().addRateData("tunnel.receiveRejectionBandwidth", 1);
break;
case TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD:
_context.statManager().addRateData("tunnel.receiveRejectionTransient", 1);
break;
case TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT:
_context.statManager().addRateData("tunnel.receiveRejectionProbabalistic", 1);
break;
case TunnelHistory.TUNNEL_REJECT_CRIT:
default:
_context.statManager().addRateData("tunnel.receiveRejectionCritical", 1);
}
// penalize peer based on their reported error level
_context.profileManager().tunnelRejected(peer, rtt, howBad);
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
}
}
if (allAgree) {
// wikked, completely build
boolean success;
if (cfg.isInbound())
success = _context.tunnelDispatcher().joinInbound(cfg);
else
success = _context.tunnelDispatcher().joinOutbound(cfg);
if (!success) {
// This will happen very rarely. We check for dups when
// creating the config, but we don't track IDs for builds in progress.
_context.statManager().addRateData("tunnel.ownDupID", 1);
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (_log.shouldLog(Log.WARN))
_log.warn("Dup ID for our own tunnel " + cfg);
return;
}
// self.self.self.foo!
cfg.getTunnelPool().addTunnel(cfg);
// call buildComplete() after addTunnel() so we don't try another build.
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildSuccessful(cfg);
if (cfg.getTunnelPool().getSettings().isExploratory()) {
// Notify router that exploratory tunnels are ready
boolean isIn = cfg.isInbound();
synchronized (_startupLock) {
switch(_explState) {
case NONE:
if (isIn)
_explState = ExplState.IB;
else
_explState = ExplState.OB;
break;
case IB:
if (!isIn) {
_explState = ExplState.BOTH;
_context.router().setExplTunnelsReady();
}
break;
case OB:
if (isIn) {
_explState = ExplState.BOTH;
_context.router().setExplTunnelsReady();
}
break;
case BOTH:
break;
}
}
}
ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool());
cfg.setExpireJob(expireJob);
_context.jobQueue().addJob(expireJob);
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratorySuccess", rtt);
else
_context.statManager().addRateData("tunnel.buildClientSuccess", rtt);
} else {
// someone is no fun
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratoryReject", rtt);
else
_context.statManager().addRateData("tunnel.buildClientReject", rtt);
}
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(msg.getUniqueId() + ": Tunnel reply could not be decrypted for tunnel " + cfg);
_context.statManager().addRateData("tunnel.corruptBuildReply", 1);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
}
}
use of net.i2p.data.router.RouterInfo in project i2p.i2p by i2p.
the class BuildExecutor method allowed.
private int allowed() {
CommSystemFacade csf = _context.commSystem();
if (csf.getStatus() == Status.DISCONNECTED)
return 0;
if (csf.isDummy() && csf.getEstablished().size() <= 0)
return 0;
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
// Max. 1 concurrent build per 6 KB/s outbound
int allowed = maxKBps / 6;
RateStat rs = _context.statManager().getRate("tunnel.buildRequestTime");
if (rs != null) {
Rate r = rs.getRate(60 * 1000);
double avg = 0;
if (r != null)
avg = r.getAverageValue();
if (avg <= 0)
avg = rs.getLifetimeAverageValue();
if (avg > 1) {
// If builds take more than 75 ms, start throttling
int throttle = (int) (75 * MAX_CONCURRENT_BUILDS / avg);
if (throttle < allowed) {
allowed = throttle;
if (allowed < MAX_CONCURRENT_BUILDS && _log.shouldLog(Log.INFO))
_log.info("Throttling max builds to " + allowed + " due to avg build time of " + ((int) avg) + " ms");
}
}
}
if (allowed < 2)
// Never choke below 2 builds (but congestion may)
allowed = 2;
else if (allowed > MAX_CONCURRENT_BUILDS)
allowed = MAX_CONCURRENT_BUILDS;
allowed = _context.getProperty("router.tunnelConcurrentBuilds", allowed);
// expire any REALLY old requests
long expireBefore = _context.clock().now() + 10 * 60 * 1000 - BuildRequestor.REQUEST_TIMEOUT - GRACE_PERIOD;
for (Iterator<PooledTunnelCreatorConfig> iter = _recentlyBuildingMap.values().iterator(); iter.hasNext(); ) {
PooledTunnelCreatorConfig cfg = iter.next();
if (cfg.getExpiration() <= expireBefore) {
iter.remove();
}
}
// expire any old requests
List<PooledTunnelCreatorConfig> expired = null;
int concurrent = 0;
// Todo: Make expiration variable
expireBefore = _context.clock().now() + 10 * 60 * 1000 - BuildRequestor.REQUEST_TIMEOUT;
for (Iterator<PooledTunnelCreatorConfig> iter = _currentlyBuildingMap.values().iterator(); iter.hasNext(); ) {
PooledTunnelCreatorConfig cfg = iter.next();
if (cfg.getExpiration() <= expireBefore) {
// save them for another minute
_recentlyBuildingMap.putIfAbsent(Long.valueOf(cfg.getReplyMessageId()), cfg);
iter.remove();
if (expired == null)
expired = new ArrayList<PooledTunnelCreatorConfig>();
expired.add(cfg);
}
}
concurrent = _currentlyBuildingMap.size();
allowed -= concurrent;
if (expired != null) {
for (int i = 0; i < expired.size(); i++) {
PooledTunnelCreatorConfig cfg = expired.get(i);
if (_log.shouldLog(Log.INFO))
_log.info("Timed out waiting for reply asking for " + cfg);
// Also note the fact that this tunnel request timed out in the peers' profiles.
for (int iPeer = 0; iPeer < cfg.getLength(); iPeer++) {
// Look up peer
Hash peer = cfg.getPeer(iPeer);
// Avoid recording ourselves
if (peer.equals(_context.routerHash()))
continue;
// Look up routerInfo
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(peer);
// Default and detect bandwidth tier
String bwTier = "Unknown";
// Returns "Unknown" if none recognized
if (ri != null)
bwTier = ri.getBandwidthTier();
// Record that a peer of the given tier expired
_context.statManager().addRateData("tunnel.tierExpire" + bwTier, 1);
didNotReply(cfg.getReplyMessageId(), peer);
// Blame everybody since we don't know whose fault it is.
// (it could be our exploratory tunnel's fault too...)
_context.profileManager().tunnelTimedOut(peer);
}
TunnelPool pool = cfg.getTunnelPool();
if (pool != null)
pool.buildComplete(cfg);
if (cfg.getDestination() == null) {
_context.statManager().addRateData("tunnel.buildExploratoryExpire", 1);
// if (cfg.isInbound())
// _context.statManager().addRateData("tunnel.buildExploratoryExpireIB", 1);
// else
// _context.statManager().addRateData("tunnel.buildExploratoryExpireOB", 1);
} else {
_context.statManager().addRateData("tunnel.buildClientExpire", 1);
// if (cfg.isInbound())
// _context.statManager().addRateData("tunnel.buildClientExpireIB", 1);
// else
// _context.statManager().addRateData("tunnel.buildClientExpireOB", 1);
}
}
}
_context.statManager().addRateData("tunnel.concurrentBuilds", concurrent, 0);
long lag = _context.jobQueue().getMaxLag();
if ((lag > 2000) && (_context.router().getUptime() > 5 * 60 * 1000)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Too lagged [" + lag + "], don't allow building");
_context.statManager().addRateData("tunnel.concurrentBuildsLagged", concurrent, lag);
// if we have a job heavily blocking our jobqueue, ssllloowww dddooowwwnnn
return 0;
}
return allowed;
}
Aggregations