use of net.i2p.router.peermanager.PeerProfile in project i2p.i2p by i2p.
the class KademliaNetworkDatabaseFacade method getPeerTimeout.
/**
* todo: does this need more tuning?
*/
public int getPeerTimeout(Hash peer) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
double responseTime = MAX_PER_PEER_TIMEOUT;
if (prof != null && prof.getIsExpandedDB()) {
responseTime = prof.getDbResponseTime().getRate(24 * 60 * 60 * 1000l).getAverageValue();
// if 0 then there is no data, set to max.
if (responseTime <= 0 || responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT;
else if (responseTime < MIN_PER_PEER_TIMEOUT)
responseTime = MIN_PER_PEER_TIMEOUT;
}
// give it up to 3x the average response time
return TIMEOUT_MULTIPLIER * (int) responseTime;
}
use of net.i2p.router.peermanager.PeerProfile in project i2p.i2p by i2p.
the class FloodfillMonitorJob method shouldBeFloodfill.
private boolean shouldBeFloodfill() {
if (!SigType.ECDSA_SHA256_P256.isAvailable())
return false;
// Hidden trumps netDb.floodfillParticipant=true
if (getContext().router().isHidden())
return false;
String enabled = getContext().getProperty(PROP_FLOODFILL_PARTICIPANT, "auto");
if ("true".equals(enabled))
return true;
if ("false".equals(enabled))
return false;
// Only if not shutting down...
if (getContext().router().gracefulShutdownInProgress())
return false;
// ARM ElG decrypt is too slow
if (SystemVersion.isARM() || SystemVersion.isAndroid())
return false;
if (getContext().getBooleanProperty(UDPTransport.PROP_LAPTOP_MODE))
return false;
// need IPv4 - The setting is the same for both SSU and NTCP, so just take the SSU one
if (TransportUtil.getIPv6Config(getContext(), "SSU") == TransportUtil.IPv6Config.IPV6_ONLY)
return false;
// need both transports
if (!TransportManager.isNTCPEnabled(getContext()))
return false;
if (!getContext().getBooleanPropertyDefaultTrue(TransportManager.PROP_ENABLE_UDP))
return false;
if (getContext().commSystem().isInBadCountry())
return false;
String country = getContext().commSystem().getOurCountry();
// anonymous proxy, satellite provider (not in bad country list)
if ("a1".equals(country) || "a2".equals(country))
return false;
// Only if up a while...
if (getContext().router().getUptime() < MIN_UPTIME)
return false;
RouterInfo ri = getContext().router().getRouterInfo();
if (ri == null)
return false;
char bw = ri.getBandwidthTier().charAt(0);
// Only if class M, N, O, P, X
if (bw != Router.CAPABILITY_BW64 && bw != Router.CAPABILITY_BW128 && bw != Router.CAPABILITY_BW256 && bw != Router.CAPABILITY_BW512 && bw != Router.CAPABILITY_BW_UNLIMITED)
return false;
// This list will not include ourselves...
List<Hash> floodfillPeers = _facade.getFloodfillPeers();
long now = getContext().clock().now();
// We know none at all! Must be our turn...
if (floodfillPeers == null || floodfillPeers.isEmpty()) {
_lastChanged = now;
return true;
}
// Only change status every so often
boolean wasFF = _facade.floodfillEnabled();
if (_lastChanged + MIN_CHANGE_DELAY > now)
return wasFF;
// This is similar to the qualification we do in FloodOnlySearchJob.runJob().
// Count the "good" ff peers.
//
// Who's not good?
// the unheard-from, unprofiled, failing, unreachable and banlisted ones.
// We should hear from floodfills pretty frequently so set a 60m time limit.
// If unprofiled we haven't talked to them in a long time.
// We aren't contacting the peer directly, so banlist doesn't strictly matter,
// but it's a bad sign, and we often banlist a peer before we fail it...
//
// Future: use Integration calculation
//
int ffcount = floodfillPeers.size();
int failcount = 0;
long before = now - 60 * 60 * 1000;
for (Hash peer : floodfillPeers) {
PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
if (profile == null || profile.getLastHeardFrom() < before || profile.getIsFailing() || getContext().banlist().isBanlisted(peer) || getContext().commSystem().wasUnreachable(peer))
failcount++;
}
if (wasFF)
ffcount++;
int good = ffcount - failcount;
boolean happy = getContext().router().getRouterInfo().getCapabilities().indexOf('R') >= 0;
// TODO - limit may still be too high
// For reference, the avg lifetime job lag on my Pi is 6.
// Should we consider avg. dropped ff jobs?
RateStat lagStat = getContext().statManager().getRate("jobQueue.jobLag");
RateStat queueStat = getContext().statManager().getRate("router.tunnelBacklog");
happy = happy && lagStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 25;
happy = happy && queueStat.getRate(60 * 60 * 1000L).getAvgOrLifetimeAvg() < 5;
// Only if we're pretty well integrated...
happy = happy && _facade.getKnownRouters() >= 400;
happy = happy && getContext().commSystem().countActivePeers() >= 50;
happy = happy && getContext().tunnelManager().getParticipatingCount() >= 25;
happy = happy && Math.abs(getContext().clock().getOffset()) < 10 * 1000;
// We need an address and no introducers
if (happy) {
RouterAddress ra = getContext().router().getRouterInfo().getTargetAddress("SSU");
if (ra == null)
happy = false;
else {
if (ra.getOption("ihost0") != null)
happy = false;
}
}
double elG = 0;
RateStat stat = getContext().statManager().getRate("crypto.elGamal.decrypt");
if (stat != null) {
Rate rate = stat.getRate(60 * 60 * 1000L);
if (rate != null) {
elG = rate.getAvgOrLifetimeAvg();
happy = happy && elG <= 40.0d;
}
}
if (_log.shouldLog(Log.DEBUG)) {
final RouterContext rc = getContext();
final String log = String.format("FF criteria breakdown: happy=%b, capabilities=%s, maxLag=%d, known=%d, " + "active=%d, participating=%d, offset=%d, ssuAddr=%s ElG=%f", happy, rc.router().getRouterInfo().getCapabilities(), rc.jobQueue().getMaxLag(), _facade.getKnownRouters(), rc.commSystem().countActivePeers(), rc.tunnelManager().getParticipatingCount(), Math.abs(rc.clock().getOffset()), rc.router().getRouterInfo().getTargetAddress("SSU").toString(), elG);
_log.debug(log);
}
// Too few, and we're reachable, let's volunteer
if (good < MIN_FF && happy) {
if (!wasFF) {
_lastChanged = now;
_log.logAlways(Log.INFO, "Only " + good + " ff peers and we want " + MIN_FF + " so we are becoming floodfill");
}
return true;
}
// Too many, or we aren't reachable, let's stop
if (good > MAX_FF || (good > MIN_FF && !happy)) {
if (wasFF) {
_lastChanged = now;
_log.logAlways(Log.INFO, "Have " + good + " ff peers and we need only " + MIN_FF + " to " + MAX_FF + " so we are disabling floodfill; reachable? " + happy);
}
return false;
}
if (_log.shouldLog(Log.INFO))
_log.info("Have " + good + " ff peers, not changing, enabled? " + wasFF + "; reachable? " + happy);
return wasFF;
}
use of net.i2p.router.peermanager.PeerProfile in project i2p.i2p by i2p.
the class FloodfillPeerSelector method selectFloodfillParticipantsIncludingUs.
/**
* See above for description
* List MAY CONTAIN our own hash unless included in toIgnore
* @param key the ROUTING key (NOT the original key)
* @param toIgnore can be null
* @param kbuckets now unused
*/
private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator<Hash>(key));
sorted.addAll(ffs);
List<Hash> rv = new ArrayList<Hash>(howMany);
List<Hash> okff = new ArrayList<Hash>(ffs.size());
List<Hash> badff = new ArrayList<Hash>(ffs.size());
int found = 0;
long now = _context.clock().now();
long installed = _context.getProperty("router.firstInstalled", 0L);
boolean enforceHeard = installed > 0 && (now - installed) > INSTALL_AGE;
double maxFailRate = 100;
if (_context.router().getUptime() > 60 * 60 * 1000) {
RateStat rs = _context.statManager().getRate("peer.failedLookupRate");
if (rs != null) {
Rate r = rs.getRate(60 * 60 * 1000);
if (r != null) {
double currentFailRate = r.getAverageValue();
maxFailRate = Math.max(0.20d, 1.5d * currentFailRate);
}
}
}
// 5 == FNDF.MAX_TO_FLOOD + 1
int limit = Math.max(5, howMany);
limit = Math.min(limit, ffs.size());
MaskedIPSet maskedIPs = new MaskedIPSet(limit * 3);
// split sorted list into 3 sorted lists
for (int i = 0; found < howMany && i < limit; i++) {
Hash entry = sorted.first();
if (entry == null)
// shouldn't happen
break;
sorted.remove(entry);
// put anybody in the same /16 at the end
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
MaskedIPSet entryIPs = new MaskedIPSet(_context, entry, info, 2);
boolean sameIP = false;
for (String ip : entryIPs) {
if (!maskedIPs.add(ip))
sameIP = true;
}
if (sameIP) {
badff.add(entry);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Same /16, family, or port: " + entry);
} else if (info != null && now - info.getPublished() > 3 * 60 * 60 * 1000) {
badff.add(entry);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Old: " + entry);
} else if (info != null && _context.commSystem().isInBadCountry(info)) {
badff.add(entry);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bad country: " + entry);
} else if (info != null && info.getBandwidthTier().equals("L")) {
badff.add(entry);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Slow: " + entry);
} else {
PeerProfile prof = _context.profileOrganizer().getProfile(entry);
double maxGoodRespTime = MAX_GOOD_RESP_TIME;
RateStat ttst = _context.statManager().getRate("tunnel.testSuccessTime");
if (ttst != null) {
Rate tunnelTestTime = ttst.getRate(10 * 60 * 1000);
if (tunnelTestTime != null && tunnelTestTime.getAverageValue() > 500)
maxGoodRespTime = 2 * tunnelTestTime.getAverageValue();
}
if (prof != null) {
if (enforceHeard && prof.getFirstHeardAbout() > now - HEARD_AGE) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bad (new): " + entry);
badff.add(entry);
} else if (prof.getDBHistory() != null) {
if (prof.getDbResponseTime().getRate(10 * 60 * 1000).getAverageValue() < maxGoodRespTime && prof.getDBHistory().getLastStoreFailed() < now - NO_FAIL_STORE_GOOD && prof.getDBHistory().getLastLookupFailed() < now - NO_FAIL_LOOKUP_GOOD && prof.getDBHistory().getFailedLookupRate().getRate(60 * 60 * 1000).getAverageValue() < maxFailRate) {
// good
if (_log.shouldLog(Log.DEBUG))
_log.debug("Good: " + entry);
rv.add(entry);
found++;
} else if (prof.getDBHistory().getLastStoreFailed() <= prof.getDBHistory().getLastStoreSuccessful() || prof.getDBHistory().getLastLookupFailed() <= prof.getDBHistory().getLastLookupSuccessful() || (prof.getDBHistory().getLastStoreFailed() < now - NO_FAIL_STORE_OK && prof.getDBHistory().getLastLookupFailed() < now - NO_FAIL_LOOKUP_OK)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("OK: " + entry);
okff.add(entry);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bad (DB): " + entry);
badff.add(entry);
}
} else {
// no DBHistory
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bad (no hist): " + entry);
badff.add(entry);
}
} else {
// no profile
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bad (no prof): " + entry);
badff.add(entry);
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Good: " + rv + " OK: " + okff + " Bad: " + badff);
// Put the ok floodfills after the good floodfills
for (int i = 0; found < howMany && i < okff.size(); i++) {
rv.add(okff.get(i));
found++;
}
// Put the "bad" floodfills after the ok floodfills
for (int i = 0; found < howMany && i < badff.size(); i++) {
rv.add(badff.get(i));
found++;
}
return rv;
}
Aggregations