use of net.i2p.router.ReplyJob in project i2p.i2p by i2p.
the class BuildTestMessageJob method runJob.
public void runJob() {
if (alreadyKnownReachable()) {
getContext().jobQueue().addJob(_onSend);
return;
}
// first goes to the peer then back to us.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64());
GarlicConfig config = buildGarlicCloveConfig();
// TODO: make the last params on this specify the correct sessionKey and tags used
ReplyJob replyJob = new JobReplyJob(getContext(), _onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet<SessionTag>());
MessageSelector sel = buildMessageSelector();
SendGarlicJob job = new SendGarlicJob(getContext(), config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel);
getContext().jobQueue().addJob(job);
}
use of net.i2p.router.ReplyJob in project i2p.i2p by i2p.
the class IterativeSearchJob method runJob.
@Override
public void runJob() {
if (_facade.isNegativeCached(_key)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Negative cached, not searching: " + _key);
failed();
return;
}
// pick some floodfill peers and send out the searches
List<Hash> floodfillPeers;
KBucketSet<Hash> ks = _facade.getKBuckets();
if (ks != null) {
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
// but we're passing the rkey not the key, so we do it below instead in certain cases.
floodfillPeers = ((FloodfillPeerSelector) _facade.getPeerSelector()).selectFloodfillParticipants(_rkey, _totalSearchLimit + EXTRA_PEERS, ks);
} else {
floodfillPeers = new ArrayList<Hash>(_totalSearchLimit);
}
// For testing or local networks... we will
// pretend that the specified router is floodfill, and always closest-to-the-key.
// May be set after startup but can't be changed or unset later.
// Warning - experts only!
String alwaysQuery = getContext().getProperty("netDb.alwaysQuery");
if (alwaysQuery != null) {
if (_alwaysQueryHash == null) {
byte[] b = Base64.decode(alwaysQuery);
if (b != null && b.length == Hash.HASH_LENGTH)
_alwaysQueryHash = Hash.create(b);
}
}
if (floodfillPeers.isEmpty()) {
// so this situation should be temporary
if (_log.shouldLog(Log.WARN))
_log.warn("Running netDb searches against the floodfill peers, but we don't know any");
List<Hash> all = new ArrayList<Hash>(_facade.getAllRouters());
if (all.isEmpty()) {
if (_log.shouldLog(Log.ERROR))
_log.error("We don't know any peers at all");
failed();
return;
}
Iterator<Hash> iter = new RandomIterator<Hash>(all);
// so once we get some FFs we want to be sure to query them
for (int i = 0; iter.hasNext() && i < MAX_NON_FF; i++) {
floodfillPeers.add(iter.next());
}
}
final boolean empty;
// outside sync to avoid deadlock
final Hash us = getContext().routerHash();
synchronized (this) {
_toTry.addAll(floodfillPeers);
// don't ask ourselves or the target
_toTry.remove(us);
_toTry.remove(_key);
empty = _toTry.isEmpty();
}
if (empty) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": ISJ for " + _key + " had no peers to send to");
// no floodfill peers, fail
failed();
return;
}
// This OutNetMessage is never used or sent (setMessage() is never called), it's only
// so we can register a reply selector.
MessageSelector replySelector = new IterativeLookupSelector(getContext(), this);
ReplyJob onReply = new FloodOnlyLookupMatchJob(getContext(), this);
Job onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
_out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": New ISJ for " + (_isLease ? "LS " : "RI ") + _key + " (rkey " + _rkey + ") timeout " + DataHelper.formatDuration(_timeoutMs) + " toTry: " + DataHelper.toString(_toTry));
retry();
}
Aggregations