use of net.i2p.data.Hash in project i2p.i2p by i2p.
the class IterativeSearchJob method failed.
/**
* Total failure
*/
@Override
void failed() {
synchronized (this) {
if (_dead)
return;
_dead = true;
}
_facade.complete(_key);
if (getContext().commSystem().getStatus() != Status.DISCONNECTED)
_facade.lookupFailed(_key);
getContext().messageRegistry().unregisterPending(_out);
int tries;
final List<Hash> unheard;
synchronized (this) {
tries = _unheardFrom.size() + _failedPeers.size();
unheard = new ArrayList<Hash>(_unheardFrom);
}
// blame the unheard-from (others already blamed in failed() above)
for (Hash h : unheard) {
getContext().profileManager().dbLookupFailed(h);
}
long time = System.currentTimeMillis() - _created;
if (_log.shouldLog(Log.INFO)) {
long timeRemaining = _expiration - getContext().clock().now();
_log.info(getJobId() + ": ISJ for " + _key + " failed with " + timeRemaining + " remaining after " + time + ", peers queried: " + tries);
}
if (tries > 0) {
// don't bias the stats with immediate fails
getContext().statManager().addRateData("netDb.failedTime", time);
getContext().statManager().addRateData("netDb.failedRetries", tries - 1);
}
for (Job j : _onFailed) {
getContext().jobQueue().addJob(j);
}
_onFailed.clear();
}
use of net.i2p.data.Hash in project i2p.i2p by i2p.
the class FloodOnlySearchJob method success.
@Override
void success() {
synchronized (this) {
if (_dead)
return;
_dead = true;
super.success();
}
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key + " successful");
// Sadly, we don't know which of the two replied, unless the first one sent a DSRM
// before the second one sent the answer, which isn't that likely.
// Would be really nice to fix this, but it isn't clear how unless CONCURRENT_SEARCHES == 1.
// Maybe don't unregister the msg from the Registry for a while and see if we get a 2nd reply?
// Or delay the 2nd search for a few seconds?
// We'll have to rely primarily on other searches (ExploreJob which calls SearchJob,
// and FloodfillVerifyStoreJob) to record successful searches for now.
// StoreJob also calls dbStoreSent() which updates the lastHeardFrom timer - this also helps.
long time = System.currentTimeMillis() - _created;
synchronized (_unheardFrom) {
if (_unheardFrom.size() == 1) {
Hash peer = _unheardFrom.iterator().next();
getContext().profileManager().dbLookupSuccessful(peer, time);
}
}
_facade.complete(_key);
getContext().statManager().addRateData("netDb.successTime", time);
for (Job j : _onFind) {
getContext().jobQueue().addJob(j);
}
}
use of net.i2p.data.Hash in project i2p.i2p by i2p.
the class FloodfillNetworkDatabaseFacade method flood.
/**
* Send to a subset of all floodfill peers.
* We do this to implement Kademlia within the floodfills, i.e.
* we flood to those closest to the key.
*/
public void flood(DatabaseEntry ds) {
Hash key = ds.getHash();
RouterKeyGenerator gen = _context.routerKeyGenerator();
Hash rkey = gen.getRoutingKey(key);
FloodfillPeerSelector sel = (FloodfillPeerSelector) getPeerSelector();
List<Hash> peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
// todo key cert skip?
long until = gen.getTimeTillMidnight();
if (until < NEXT_RKEY_LS_ADVANCE_TIME || (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && until < NEXT_RKEY_RI_ADVANCE_TIME)) {
// to avoid lookup faulures after midnight, also flood to some closest to the
// next routing key for a period of time before midnight.
Hash nkey = gen.getNextRoutingKey(key);
List<Hash> nextPeers = sel.selectFloodfillParticipants(nkey, NEXT_FLOOD_QTY, getKBuckets());
int i = 0;
for (Hash h : nextPeers) {
// But other implementations may not...
if (h.equals(key))
continue;
// todo key cert skip?
if (!peers.contains(h)) {
peers.add(h);
i++;
}
}
if (i > 0 && _log.shouldLog(Log.INFO))
_log.info("Flooding the entry for " + key + " to " + i + " more, just before midnight");
}
int flooded = 0;
for (int i = 0; i < peers.size(); i++) {
Hash peer = peers.get(i);
RouterInfo target = lookupRouterInfoLocally(peer);
if ((target == null) || (_context.banlist().isBanlisted(peer)))
continue;
// But other implementations may not...
if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && peer.equals(key))
continue;
if (peer.equals(_context.routerHash()))
continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
msg.setEntry(ds);
OutNetMessage m = new OutNetMessage(_context, msg, _context.clock().now() + FLOOD_TIMEOUT, FLOOD_PRIORITY, target);
Job floodFail = new FloodFailedJob(_context, peer);
m.setOnFailedSendJob(floodFail);
// we want to give credit on success, even if we aren't sure,
// because otherwise no use noting failure
Job floodGood = new FloodSuccessJob(_context, peer);
m.setOnSendJob(floodGood);
_context.commSystem().processMessage(m);
flooded++;
if (_log.shouldLog(Log.INFO))
_log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
}
if (_log.shouldLog(Log.INFO))
_log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
}
use of net.i2p.data.Hash in project i2p.i2p by i2p.
the class FloodfillPeerSelector method selectNearest.
/**
* Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM.
* UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory
* lookup, and the response should not include floodfills.
* List MAY INCLUDE our own router - add to peersToIgnore if you don't want
*
* @param key the original key (NOT the routing key)
* @param peersToIgnore can be null
* @return List of Hash for the peers selected, ordered
*/
@Override
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
// return non-ff
peersToIgnore.addAll(selectFloodfillParticipants(peersToIgnore, kbuckets));
// TODO this is very slow
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(rkey, peersToIgnore, maxNumRouters);
kbuckets.getAll(matches);
return matches.get(maxNumRouters);
} else {
// return ff
return selectFloodfillParticipantsIncludingUs(rkey, maxNumRouters, peersToIgnore, kbuckets);
}
}
use of net.i2p.data.Hash in project i2p.i2p by i2p.
the class FloodfillPeerSelector method selectFloodfillParticipants.
/**
* @param kbuckets now unused
* @param toIgnore can be null
* @return all floodfills not banlisted forever.
* List MAY INCLUDE our own hash.
* List is not sorted and not shuffled.
*/
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
/**
***
* if (kbuckets == null) return Collections.EMPTY_LIST;
* // TODO this is very slow - use profile getPeersByCapability('f') instead
* _context.statManager().addRateData("netDb.newFSC", 0, 0);
* FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
* kbuckets.getAll(matches);
* return matches.getFloodfillParticipants();
****
*/
Set<Hash> set = _context.peerManager().getPeersByCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL);
List<Hash> rv = new ArrayList<Hash>(set.size());
for (Hash h : set) {
if ((toIgnore != null && toIgnore.contains(h)) || _context.banlist().isBanlistedForever(h))
continue;
rv.add(h);
}
return rv;
}
Aggregations