use of com.biglybt.pif.peers.PeerManager in project BiglyBT by BiglySoftware.
the class ExternalSeedPlugin method addPeers.
protected void addPeers(final Download download, List _peers) {
final List peers = new ArrayList();
peers.addAll(_peers);
if (peers.size() > 0) {
boolean add_listener = false;
try {
download_mon.enter();
List existing_peers = (List) download_map.get(download);
if (existing_peers == null) {
add_listener = true;
existing_peers = new ArrayList();
download_map.put(download, existing_peers);
}
Iterator it = peers.iterator();
while (it.hasNext()) {
ExternalSeedPeer peer = (ExternalSeedPeer) it.next();
boolean skip = false;
for (int j = 0; j < existing_peers.size(); j++) {
ExternalSeedPeer existing_peer = (ExternalSeedPeer) existing_peers.get(j);
if (existing_peer.sameAs(peer)) {
skip = true;
break;
}
}
if (skip) {
it.remove();
} else {
log(download.getName() + " found seed " + peer.getName());
existing_peers.add(peer);
}
}
setStatus("Running: Downloads with external seeds = " + download_map.size());
} finally {
download_mon.exit();
}
if (add_listener) {
download.addPeerListener(new DownloadPeerListener() {
@Override
public void peerManagerAdded(Download download, PeerManager peer_manager) {
List existing_peers = getPeers();
if (existing_peers == null) {
return;
}
for (int i = 0; i < existing_peers.size(); i++) {
ExternalSeedPeer peer = (ExternalSeedPeer) existing_peers.get(i);
peer.setManager(peer_manager);
}
}
@Override
public void peerManagerRemoved(Download download, PeerManager peer_manager) {
List existing_peers = getPeers();
if (existing_peers == null) {
return;
}
for (int i = 0; i < existing_peers.size(); i++) {
ExternalSeedPeer peer = (ExternalSeedPeer) existing_peers.get(i);
peer.setManager(null);
}
}
protected List getPeers() {
List existing_peers = null;
try {
download_mon.enter();
List temp = (List) download_map.get(download);
if (temp != null) {
existing_peers = new ArrayList(temp.size());
existing_peers.addAll(temp);
}
} finally {
download_mon.exit();
}
return (existing_peers);
}
});
} else {
// fix up newly added peers to current peer manager
PeerManager existing_pm = download.getPeerManager();
if (existing_pm != null) {
for (int i = 0; i < peers.size(); i++) {
ExternalSeedPeer peer = (ExternalSeedPeer) peers.get(i);
if (peer.getManager() == null) {
peer.setManager(existing_pm);
}
}
}
}
}
}
use of com.biglybt.pif.peers.PeerManager in project BiglyBT by BiglySoftware.
the class DefaultRankCalculator method _recalcSeedingRankSupport.
private int _recalcSeedingRankSupport(int oldSR) {
sExplainSR = "";
DownloadStats stats = dl.getStats();
int newSR = 0;
// make undownloaded sort to top so they can start first.
if (!dl.isComplete()) {
newSR = SR_COMPLETE_STARTS_AT + (10000 - dl.getPosition());
// make sure we capture FP being turned off when torrent does from
// complete to incomplete
isFirstPriority();
if (rules.bDebugLog) {
sExplainSR += " not complete. SetSR " + newSR + "\n";
}
return newSR;
}
// here we are seeding
lastModifiedShareRatio = stats.getShareRatio();
DownloadScrapeResult sr = dl.getAggregatedScrapeResult();
lastModifiedScrapeResultPeers = rules.calcPeersNoUs(dl, sr);
lastModifiedScrapeResultSeeds = rules.calcSeedsNoUs(dl, sr);
boolean bScrapeResultsOk = (lastModifiedScrapeResultPeers > 0 || lastModifiedScrapeResultSeeds > 0 || lastScrapeResultOk) && (lastModifiedScrapeResultPeers >= 0 && lastModifiedScrapeResultSeeds >= 0);
if (!isFirstPriority()) {
// Check Ignore Rules
// never apply ignore rules to First Priority Matches
// (we don't want leechers circumventing the 0.5 rule)
// 0 means unlimited
int activeMaxSR = dlSpecificMaxShareRatio;
if (activeMaxSR <= 0) {
activeMaxSR = iIgnoreShareRatio;
}
if (activeMaxSR != 0 && lastModifiedShareRatio >= activeMaxSR && (lastModifiedScrapeResultSeeds >= iIgnoreShareRatio_SeedStart || !bScrapeResultsOk) && lastModifiedShareRatio != -1) {
if (rules.bDebugLog)
sExplainSR += " shareratio met: shareRatio(" + lastModifiedShareRatio + ") >= " + activeMaxSR + "\n";
return SR_SHARERATIOMET;
} else if (rules.bDebugLog && activeMaxSR != 0 && lastModifiedShareRatio >= activeMaxSR) {
sExplainSR += " shareratio NOT met: ";
if (lastModifiedScrapeResultSeeds >= iIgnoreShareRatio_SeedStart)
sExplainSR += lastModifiedScrapeResultSeeds + " below seed threshold of " + iIgnoreShareRatio_SeedStart;
sExplainSR += "\n";
}
if (lastModifiedScrapeResultPeers == 0 && bScrapeResultsOk) {
// We have to use the normal SR_0PEERS in case it isn't FP
if (bIgnore0Peers) {
if (rules.bDebugLog)
sExplainSR += " Ignore 0 Peers criteria met\n";
return SR_0PEERS;
}
// if (bFirstPriorityIgnore0Peer) {
// if (rules.bDebugLog)
// sExplainSR += " Ignore 0 Peers criteria for FP met\n";
//
// return SR_FP0PEERS;
// }
} else if (rules.bDebugLog && lastModifiedScrapeResultPeers == 0) {
sExplainSR += " 0 Peer Ignore rule NOT applied: Scrape invalid\n";
}
// 0 means disabled
if ((iIgnoreSeedCount != 0) && (lastModifiedScrapeResultSeeds >= iIgnoreSeedCount)) {
if (rules.bDebugLog)
sExplainSR += " SeedCount Ignore rule met. numSeeds(" + lastModifiedScrapeResultSeeds + " >= iIgnoreSeedCount(" + iIgnoreSeedCount + ")\n";
return SR_NUMSEEDSMET;
}
// 0 means never stop
if (iIgnoreRatioPeers != 0 && lastModifiedScrapeResultSeeds != 0) {
float ratio = (float) lastModifiedScrapeResultPeers / lastModifiedScrapeResultSeeds;
if (ratio <= iIgnoreRatioPeers && lastModifiedScrapeResultSeeds >= iIgnoreRatioPeers_SeedStart) {
if (rules.bDebugLog)
sExplainSR += " P:S Ignore rule met. ratio(" + ratio + " <= threshold(" + iIgnoreRatioPeers_SeedStart + ")\n";
return SR_RATIOMET;
}
}
}
// Never do anything with rank type of none
if (iRankType == StartStopRulesDefaultPlugin.RANK_NONE) {
if (rules.bDebugLog)
sExplainSR += " Ranking Type set to none.. blanking seeding rank\n";
// everythink ok!
return newSR;
}
if (iRankType == StartStopRulesDefaultPlugin.RANK_TIMED) {
if (bIsFirstPriority) {
newSR += SR_TIMED_QUEUED_ENDS_AT + 1;
return newSR;
}
int state = dl.getState();
if (state == Download.ST_STOPPING || state == Download.ST_STOPPED || state == Download.ST_ERROR) {
if (rules.bDebugLog)
sExplainSR += " Download stopping, stopped or in error\n";
return SR_NOTQUEUED;
} else if (state == Download.ST_SEEDING || state == Download.ST_READY || state == Download.ST_WAITING || state == Download.ST_PREPARING) {
// force sort to top
long lMsElapsed = 0;
long lMsTimeToSeedFor = minTimeAlive;
if (state == Download.ST_SEEDING && !dl.isForceStart()) {
lMsElapsed = (SystemTime.getCurrentTime() - stats.getTimeStartedSeeding());
if (iTimed_MinSeedingTimeWithPeers > 0) {
PeerManager peerManager = dl.getPeerManager();
if (peerManager != null) {
int connectedLeechers = peerManager.getStats().getConnectedLeechers();
if (connectedLeechers > 0) {
lMsTimeToSeedFor = iTimed_MinSeedingTimeWithPeers;
}
}
}
}
if (lMsElapsed >= lMsTimeToSeedFor) {
newSR = 1;
if (oldSR > SR_TIMED_QUEUED_ENDS_AT) {
rules.requestProcessCycle(null);
if (rules.bDebugLog)
rules.log.log(dl.getTorrent(), LoggerChannel.LT_INFORMATION, "somethingChanged: TimeUp");
}
} else {
newSR = SR_TIMED_QUEUED_ENDS_AT + 1 + (int) (lMsElapsed / 1000);
if (oldSR <= SR_TIMED_QUEUED_ENDS_AT) {
rules.requestProcessCycle(null);
if (rules.bDebugLog)
rules.log.log(dl.getTorrent(), LoggerChannel.LT_INFORMATION, "somethingChanged: strange timer change");
}
}
return newSR;
} else {
// ST_QUEUED
// priority goes to ones who haven't been seeded for long
// maybe share ratio might work well too
long diff;
if (dlLastActiveTime == 0) {
diff = dl.getStats().getSecondsOnlySeeding();
if (diff > SR_TIMED_QUEUED_ENDS_AT - 100000) {
// close to overrunning.. so base off position
diff = SR_TIMED_QUEUED_ENDS_AT - 100000 + dl.getPosition();
}
newSR = SR_TIMED_QUEUED_ENDS_AT - (int) diff;
} else {
diff = ((System.currentTimeMillis() / 1000) - (dlLastActiveTime / 1000));
if (diff >= SR_TIMED_QUEUED_ENDS_AT) {
newSR = SR_TIMED_QUEUED_ENDS_AT - 1;
} else {
newSR = (int) diff;
}
}
return newSR;
}
}
// SeedCount and SPRatio require Scrape Results..
if (bScrapeResultsOk) {
if (iRankType == StartStopRulesDefaultPlugin.RANK_PEERCOUNT) {
if (lastModifiedScrapeResultPeers > lastModifiedScrapeResultSeeds * 10)
newSR = 100 * lastModifiedScrapeResultPeers * 10;
else
newSR = (int) ((long) 100 * lastModifiedScrapeResultPeers * lastModifiedScrapeResultPeers / (lastModifiedScrapeResultSeeds + 1));
} else if ((iRankType == StartStopRulesDefaultPlugin.RANK_SEEDCOUNT) && (iRankTypeSeedFallback == 0 || iRankTypeSeedFallback > lastModifiedScrapeResultSeeds)) {
if (lastModifiedScrapeResultSeeds < 10000)
newSR = 10000 - lastModifiedScrapeResultSeeds;
else
newSR = 1;
// shift over to make way for fallback
newSR *= SEEDONLY_SHIFT;
} else {
// iRankType == RANK_SPRATIO or we are falling back
if (lastModifiedScrapeResultPeers != 0) {
if (lastModifiedScrapeResultSeeds == 0) {
if (lastModifiedScrapeResultPeers >= minPeersToBoostNoSeeds)
newSR += SPRATIO_BASE_LIMIT;
} else {
// numSeeds != 0 && numPeers != 0
float x = (float) lastModifiedScrapeResultSeeds / lastModifiedScrapeResultPeers;
newSR += SPRATIO_BASE_LIMIT / ((x + 1) * (x + 1));
}
}
}
} else {
if (rules.bDebugLog)
sExplainSR += " Can't calculate SR, no scrape results\n";
}
if (staleCDOffset > 0) {
// every 10 minutes of not being active, subtract one SR
if (newSR > staleCDOffset) {
newSR -= staleCDOffset;
sExplainSR += " subtracted " + staleCDOffset + " due to non-activeness\n";
} else {
staleCDOffset = 0;
}
}
if (newSR < 0)
newSR = 1;
return newSR;
}
use of com.biglybt.pif.peers.PeerManager in project BiglyBT by BiglySoftware.
the class SeedingRankColumnListener method refresh.
@Override
public void refresh(TableCell cell) {
Download dl = (Download) cell.getDataSource();
if (dl == null)
return;
DefaultRankCalculator dlData = null;
Object o = cell.getSortValue();
if (o instanceof DefaultRankCalculator)
dlData = (DefaultRankCalculator) o;
else {
dlData = (DefaultRankCalculator) downloadDataMap.get(dl);
cell.setSortValue(dlData);
}
if (dlData == null)
return;
long sr = dl.getSeedingRank();
String sText = "";
if (sr >= 0) {
if (dlData.getCachedIsFP())
sText += MessageText.getString("StartStopRules.firstPriority") + " ";
if (iRankType == StartStopRulesDefaultPlugin.RANK_TIMED) {
// sText += "" + sr + " ";
if (sr > DefaultRankCalculator.SR_TIMED_QUEUED_ENDS_AT) {
long timeStarted = dl.getStats().getTimeStartedSeeding();
long timeLeft;
long lMsTimeToSeedFor = minTimeAlive;
if (iTimed_MinSeedingTimeWithPeers > 0) {
PeerManager peerManager = dl.getPeerManager();
if (peerManager != null) {
int connectedLeechers = peerManager.getStats().getConnectedLeechers();
if (connectedLeechers > 0) {
lMsTimeToSeedFor = iTimed_MinSeedingTimeWithPeers;
}
}
}
if (dl.isForceStart())
timeLeft = Constants.CRAPPY_INFINITY_AS_INT;
else if (timeStarted <= 0)
timeLeft = lMsTimeToSeedFor;
else
timeLeft = (lMsTimeToSeedFor - (SystemTime.getCurrentTime() - timeStarted));
sText += TimeFormatter.format(timeLeft / 1000);
} else if (sr > 0) {
sText += MessageText.getString("StartStopRules.waiting");
}
} else if (sr > 0) {
sText += String.valueOf(sr);
}
} else if (sr == DefaultRankCalculator.SR_FP0PEERS)
sText = MessageText.getString("StartStopRules.FP0Peers");
else if (sr == DefaultRankCalculator.SR_FP_SPRATIOMET)
sText = MessageText.getString("StartStopRules.SPratioMet");
else if (sr == DefaultRankCalculator.SR_RATIOMET)
sText = MessageText.getString("StartStopRules.ratioMet");
else if (sr == DefaultRankCalculator.SR_NUMSEEDSMET)
sText = MessageText.getString("StartStopRules.numSeedsMet");
else if (sr == DefaultRankCalculator.SR_NOTQUEUED)
sText = "";
else if (sr == DefaultRankCalculator.SR_0PEERS)
sText = MessageText.getString("StartStopRules.0Peers");
else if (sr == DefaultRankCalculator.SR_SHARERATIOMET)
sText = MessageText.getString("StartStopRules.shareRatioMet");
else {
sText = "ERR" + sr;
}
// Add a Star if it's before minTimeAlive
if (SystemTime.getCurrentTime() - dl.getStats().getTimeStartedSeeding() < minTimeAlive)
sText = "* " + sText;
cell.setText(sText);
if (bDebugLog) {
cell.setToolTip("FP:\n" + dlData.sExplainFP + "\n" + "SR:" + dlData.sExplainSR + "\n" + "TRACE:\n" + dlData.sTrace);
} else {
cell.setToolTip(null);
}
}
use of com.biglybt.pif.peers.PeerManager in project BiglyBT by BiglySoftware.
the class DHTTrackerPlugin method processRegistrations.
protected void processRegistrations(boolean full_processing) {
int tcp_port = plugin_interface.getPluginconfig().getUnsafeIntParameter("TCP.Listen.Port");
String port_override = COConfigurationManager.getStringParameter("TCP.Listen.Port.Override");
if (!port_override.equals("")) {
try {
tcp_port = Integer.parseInt(port_override);
} catch (Throwable e) {
}
}
if (tcp_port == 0) {
log.log("TCP port=0, registration not performed");
return;
}
String override_ips = COConfigurationManager.getStringParameter("Override Ip", "");
String override_ip = null;
if (override_ips.length() > 0) {
// gotta select an appropriate override based on network type
StringTokenizer tok = new StringTokenizer(override_ips, ";");
while (tok.hasMoreTokens()) {
String this_address = (String) tok.nextToken().trim();
if (this_address.length() > 0) {
String cat = AENetworkClassifier.categoriseAddress(this_address);
if (cat == AENetworkClassifier.AT_PUBLIC) {
override_ip = this_address;
break;
}
}
}
}
if (override_ip != null) {
try {
override_ip = PRHelpers.DNSToIPAddress(override_ip);
} catch (UnknownHostException e) {
log.log(" Can't resolve IP override '" + override_ip + "'");
override_ip = null;
}
}
ArrayList<Download> rds;
try {
this_mon.enter();
rds = new ArrayList<>(running_downloads.keySet());
} finally {
this_mon.exit();
}
long now = SystemTime.getCurrentTime();
if (full_processing) {
Iterator<Download> rds_it = rds.iterator();
List<Object[]> interesting = new ArrayList<>();
while (rds_it.hasNext()) {
Download dl = rds_it.next();
int reg_type = REG_TYPE_NONE;
try {
this_mon.enter();
int[] run_data = running_downloads.get(dl);
if (run_data != null) {
reg_type = run_data[0];
}
} finally {
this_mon.exit();
}
if (reg_type == REG_TYPE_NONE) {
continue;
}
long metric = getDerivedTrackMetric(dl);
interesting.add(new Object[] { dl, new Long(metric) });
}
Collections.sort(interesting, new Comparator<Object[]>() {
@Override
public int compare(Object[] entry1, Object[] entry2) {
long res = ((Long) entry2[1]).longValue() - ((Long) entry1[1]).longValue();
if (res < 0) {
return (-1);
} else if (res > 0) {
return (1);
} else {
return (0);
}
}
});
Iterator<Object[]> it = interesting.iterator();
int num = 0;
while (it.hasNext()) {
Object[] entry = it.next();
Download dl = (Download) entry[0];
long metric = ((Long) entry[1]).longValue();
num++;
if (metric > 0) {
if (num <= DL_DERIVED_MIN_TRACK) {
// leave as is
} else if (num <= DL_DERIVED_MAX_TRACK) {
// scale metric between limits
metric = (metric * (DL_DERIVED_MAX_TRACK - num)) / (DL_DERIVED_MAX_TRACK - DL_DERIVED_MIN_TRACK);
} else {
metric = 0;
}
}
if (metric > 0) {
dl.setUserData(DL_DERIVED_METRIC_KEY, new Long(metric));
} else {
dl.setUserData(DL_DERIVED_METRIC_KEY, null);
}
}
}
Iterator<Download> rds_it = rds.iterator();
while (rds_it.hasNext()) {
Download dl = rds_it.next();
int reg_type = REG_TYPE_NONE;
try {
this_mon.enter();
int[] run_data = running_downloads.get(dl);
if (run_data != null) {
reg_type = run_data[0];
}
} finally {
this_mon.exit();
}
if (reg_type == REG_TYPE_NONE) {
continue;
}
// format is [ip_override:]tcp_port[;CI...][;udp_port]
String value_to_put = override_ip == null ? "" : (override_ip + ":");
value_to_put += tcp_port;
String put_flags = ";";
if (NetworkManager.REQUIRE_CRYPTO_HANDSHAKE) {
put_flags += "C";
}
String[] networks = dl.getListAttribute(ta_networks);
boolean i2p = false;
if (networks != null) {
for (String net : networks) {
if (net == AENetworkClassifier.AT_I2P) {
if (I2PHelpers.isI2PInstalled()) {
put_flags += "I";
}
i2p = true;
break;
}
}
}
if (put_flags.length() > 1) {
value_to_put += put_flags;
}
int udp_port = plugin_interface.getPluginconfig().getUnsafeIntParameter("UDP.Listen.Port");
int dht_port = dht.getLocalAddress().getAddress().getPort();
if (udp_port != dht_port) {
value_to_put += ";" + udp_port;
}
putDetails put_details = new putDetails(value_to_put, override_ip, tcp_port, udp_port, i2p);
byte dht_flags = isComplete(dl) ? DHTPlugin.FLAG_SEEDING : DHTPlugin.FLAG_DOWNLOADING;
RegistrationDetails registration = (RegistrationDetails) registered_downloads.get(dl);
boolean do_it = false;
if (registration == null) {
log(dl, "Registering download as " + (dht_flags == DHTPlugin.FLAG_SEEDING ? "Seeding" : "Downloading"));
registration = new RegistrationDetails(dl, reg_type, put_details, dht_flags);
registered_downloads.put(dl, registration);
do_it = true;
} else {
boolean targets_changed = false;
if (full_processing) {
targets_changed = registration.updateTargets(dl, reg_type);
}
if (targets_changed || registration.getFlags() != dht_flags || !registration.getPutDetails().sameAs(put_details)) {
log(dl, (registration == null ? "Registering" : "Re-registering") + " download as " + (dht_flags == DHTPlugin.FLAG_SEEDING ? "Seeding" : "Downloading"));
registration.update(put_details, dht_flags);
do_it = true;
}
}
if (do_it) {
try {
this_mon.enter();
query_map.put(dl, new Long(now));
} finally {
this_mon.exit();
}
trackerPut(dl, registration);
}
}
// second any removals
Iterator<Map.Entry<Download, RegistrationDetails>> rd_it = registered_downloads.entrySet().iterator();
while (rd_it.hasNext()) {
Map.Entry<Download, RegistrationDetails> entry = rd_it.next();
final Download dl = entry.getKey();
boolean unregister;
try {
this_mon.enter();
unregister = !running_downloads.containsKey(dl);
} finally {
this_mon.exit();
}
if (unregister) {
log(dl, "Unregistering download");
rd_it.remove();
try {
this_mon.enter();
query_map.remove(dl);
} finally {
this_mon.exit();
}
trackerRemove(dl, entry.getValue());
}
}
// lastly gets
rds_it = rds.iterator();
while (rds_it.hasNext()) {
final Download dl = (Download) rds_it.next();
Long next_time;
try {
this_mon.enter();
next_time = (Long) query_map.get(dl);
} finally {
this_mon.exit();
}
if (next_time != null && now >= next_time.longValue()) {
int reg_type = REG_TYPE_NONE;
try {
this_mon.enter();
query_map.remove(dl);
int[] run_data = running_downloads.get(dl);
if (run_data != null) {
reg_type = run_data[0];
}
} finally {
this_mon.exit();
}
final long start = SystemTime.getCurrentTime();
// if we're already connected to > NUM_WANT peers then don't bother with the main announce
PeerManager pm = dl.getPeerManager();
// don't query if this download already has an active DHT operation
boolean skip = isActive(dl) || reg_type == REG_TYPE_NONE;
if (skip) {
log(dl, "Deferring announce as activity outstanding");
}
RegistrationDetails registration = (RegistrationDetails) registered_downloads.get(dl);
if (registration == null) {
Debug.out("Inconsistent, registration should be non-null");
continue;
}
boolean derived_only = false;
if (pm != null && !skip) {
int con = pm.getStats().getConnectedLeechers() + pm.getStats().getConnectedSeeds();
derived_only = con >= NUM_WANT;
}
if (!skip) {
skip = trackerGet(dl, registration, derived_only) == 0;
}
if (skip) {
try {
this_mon.enter();
if (running_downloads.containsKey(dl)) {
// use "min" here as we're just deferring it
query_map.put(dl, new Long(start + ANNOUNCE_MIN_DEFAULT));
}
} finally {
this_mon.exit();
}
}
}
}
}
use of com.biglybt.pif.peers.PeerManager in project BiglyBT by BiglySoftware.
the class DHTTrackerPlugin method trackerGet.
protected int trackerGet(final Download download, final RegistrationDetails details, final boolean derived_only) {
final long start = SystemTime.getCurrentTime();
final Torrent torrent = download.getTorrent();
final URL url_to_report = torrent.isDecentralised() ? torrent.getAnnounceURL() : DEFAULT_URL;
trackerTarget[] targets = details.getTargets(false);
final long[] max_retry = { 0 };
final boolean do_alt = alt_lookup_handler != null && (!(download.getFlag(Download.FLAG_LOW_NOISE) || download.getFlag(Download.FLAG_LIGHT_WEIGHT)));
int num_done = 0;
for (int i = 0; i < targets.length; i++) {
final trackerTarget target = targets[i];
int target_type = target.getType();
if (target_type == REG_TYPE_FULL && derived_only) {
continue;
} else if (target_type == REG_TYPE_DERIVED && dht.isSleeping()) {
continue;
}
increaseActive(download);
num_done++;
final boolean is_complete = isComplete(download);
dht.get(target.getHash(), "Tracker announce for '" + download.getName() + "'" + target.getDesc(), is_complete ? DHTPlugin.FLAG_SEEDING : DHTPlugin.FLAG_DOWNLOADING, NUM_WANT, target_type == REG_TYPE_FULL ? ANNOUNCE_TIMEOUT : ANNOUNCE_DERIVED_TIMEOUT, false, false, new DHTPluginOperationListener() {
List<String> addresses = new ArrayList<>();
List<Integer> ports = new ArrayList<>();
List<Integer> udp_ports = new ArrayList<>();
List<Boolean> is_seeds = new ArrayList<>();
List<String> flags = new ArrayList<>();
int seed_count;
int leecher_count;
int i2p_seed_count;
int i2p_leecher_count;
volatile boolean complete;
{
if (do_alt) {
alt_lookup_handler.get(target.getHash(), is_complete, new DHTTrackerPluginAlt.LookupListener() {
@Override
public void foundPeer(InetSocketAddress address) {
alternativePeerRead(address);
}
@Override
public boolean isComplete() {
return (complete && addresses.size() > 5);
}
@Override
public void completed() {
}
});
}
}
@Override
public boolean diversified() {
return (true);
}
@Override
public void starts(byte[] key) {
}
private void alternativePeerRead(InetSocketAddress peer) {
boolean try_injection = false;
synchronized (this) {
if (complete) {
try_injection = addresses.size() < 5;
} else {
try {
addresses.add(peer.getAddress().getHostAddress());
ports.add(peer.getPort());
udp_ports.add(0);
flags.add(null);
is_seeds.add(false);
leecher_count++;
} catch (Throwable e) {
}
}
}
if (try_injection) {
PeerManager pm = download.getPeerManager();
if (pm != null) {
pm.peerDiscovered(PEPeerSource.PS_DHT, peer.getAddress().getHostAddress(), peer.getPort(), 0, NetworkManager.getCryptoRequired(NetworkManager.CRYPTO_OVERRIDE_NONE));
}
}
}
@Override
public void valueRead(DHTPluginContact originator, DHTPluginValue value) {
synchronized (this) {
if (complete) {
return;
}
try {
String[] tokens = new String(value.getValue()).split(";");
String tcp_part = tokens[0].trim();
int sep = tcp_part.indexOf(':');
String ip_str = null;
String tcp_port_str;
if (sep == -1) {
tcp_port_str = tcp_part;
} else {
ip_str = tcp_part.substring(0, sep);
tcp_port_str = tcp_part.substring(sep + 1);
}
int tcp_port = Integer.parseInt(tcp_port_str);
if (tcp_port > 0 && tcp_port < 65536) {
String flag_str = null;
int udp_port = -1;
boolean has_i2p = false;
try {
for (int i = 1; i < tokens.length; i++) {
String token = tokens[i].trim();
if (token.length() > 0) {
if (Character.isDigit(token.charAt(0))) {
udp_port = Integer.parseInt(token);
if (udp_port <= 0 || udp_port >= 65536) {
udp_port = -1;
}
} else {
flag_str = token;
if (flag_str.contains("I")) {
has_i2p = true;
}
}
}
}
} catch (Throwable e) {
}
addresses.add(ip_str == null ? originator.getAddress().getAddress().getHostAddress() : ip_str);
ports.add(new Integer(tcp_port));
udp_ports.add(new Integer(udp_port == -1 ? originator.getAddress().getPort() : udp_port));
flags.add(flag_str);
if ((value.getFlags() & DHTPlugin.FLAG_DOWNLOADING) == 1) {
leecher_count++;
is_seeds.add(Boolean.FALSE);
if (has_i2p) {
i2p_leecher_count++;
}
} else {
is_seeds.add(Boolean.TRUE);
seed_count++;
if (has_i2p) {
i2p_seed_count++;
}
}
}
} catch (Throwable e) {
// in case we get crap back (someone spamming the DHT) just
// silently ignore
}
}
}
@Override
public void valueWritten(DHTPluginContact target, DHTPluginValue value) {
}
@Override
public void complete(byte[] key, boolean timeout_occurred) {
synchronized (this) {
if (complete) {
return;
}
complete = true;
}
if (target.getType() == REG_TYPE_FULL || (target.getType() == REG_TYPE_DERIVED && seed_count + leecher_count > 1)) {
log(download, "Get of '" + target.getDesc() + "' completed (elapsed=" + TimeFormatter.formatColonMillis(SystemTime.getCurrentTime() - start) + "), addresses=" + addresses.size() + ", seeds=" + seed_count + ", leechers=" + leecher_count);
}
decreaseActive(download);
int peers_found = addresses.size();
List<DownloadAnnounceResultPeer> peers_for_announce = new ArrayList<>();
// scale min and max based on number of active torrents
// we don't want more than a few announces a minute
int announce_per_min = 4;
int num_active = query_map.size();
int announce_min = Math.max(ANNOUNCE_MIN_DEFAULT, (num_active / announce_per_min) * 60 * 1000);
int announce_max = derived_only ? ANNOUNCE_MAX_DERIVED_ONLY : ANNOUNCE_MAX;
announce_min = Math.min(announce_min, announce_max);
current_announce_interval = announce_min;
final long retry = announce_min + peers_found * (long) (announce_max - announce_min) / NUM_WANT;
int download_state = download.getState();
boolean we_are_seeding = download_state == Download.ST_SEEDING;
try {
this_mon.enter();
int[] run_data = running_downloads.get(download);
if (run_data != null) {
boolean full = target.getType() == REG_TYPE_FULL;
int peer_count = we_are_seeding ? leecher_count : (seed_count + leecher_count);
run_data[1] = full ? seed_count : Math.max(run_data[1], seed_count);
run_data[2] = full ? leecher_count : Math.max(run_data[2], leecher_count);
run_data[3] = full ? peer_count : Math.max(run_data[3], peer_count);
run_data[4] = (int) (SystemTime.getCurrentTime() / 1000);
long absolute_retry = SystemTime.getCurrentTime() + retry;
if (absolute_retry > max_retry[0]) {
// only update next query time if none set yet
// or we appear to have set the existing one. If we
// don't do this then we'll overwrite any rescheduled
// announces
Long existing = (Long) query_map.get(download);
if (existing == null || existing.longValue() == max_retry[0]) {
max_retry[0] = absolute_retry;
query_map.put(download, new Long(absolute_retry));
}
}
}
} finally {
this_mon.exit();
}
putDetails put_details = details.getPutDetails();
String ext_address = put_details.getIPOverride();
if (ext_address == null) {
ext_address = dht.getLocalAddress().getAddress().getAddress().getHostAddress();
}
if (put_details.hasI2P()) {
if (we_are_seeding) {
if (i2p_seed_count > 0) {
i2p_seed_count--;
}
} else {
if (i2p_leecher_count > 0) {
i2p_leecher_count--;
}
}
}
if (i2p_seed_count + i2p_leecher_count > 0) {
download.setUserData(DOWNLOAD_USER_DATA_I2P_SCRAPE_KEY, new int[] { i2p_seed_count, i2p_leecher_count });
} else {
download.setUserData(DOWNLOAD_USER_DATA_I2P_SCRAPE_KEY, null);
}
for (int i = 0; i < addresses.size(); i++) {
if (we_are_seeding && ((Boolean) is_seeds.get(i)).booleanValue()) {
continue;
}
// remove ourselves
String ip = (String) addresses.get(i);
if (ip.equals(ext_address)) {
if (((Integer) ports.get(i)).intValue() == put_details.getTCPPort() && ((Integer) udp_ports.get(i)).intValue() == put_details.getUDPPort()) {
continue;
}
}
final int f_i = i;
peers_for_announce.add(new DownloadAnnounceResultPeer() {
@Override
public String getSource() {
return (PEPeerSource.PS_DHT);
}
@Override
public String getAddress() {
return ((String) addresses.get(f_i));
}
@Override
public int getPort() {
return (((Integer) ports.get(f_i)).intValue());
}
@Override
public int getUDPPort() {
return (((Integer) udp_ports.get(f_i)).intValue());
}
@Override
public byte[] getPeerID() {
return (null);
}
@Override
public short getProtocol() {
String flag = (String) flags.get(f_i);
short protocol = DownloadAnnounceResultPeer.PROTOCOL_NORMAL;
if (flag != null) {
if (flag.contains("C")) {
protocol = DownloadAnnounceResultPeer.PROTOCOL_CRYPT;
}
}
return (protocol);
}
});
}
if (target.getType() == REG_TYPE_DERIVED && peers_for_announce.size() > 0) {
PeerManager pm = download.getPeerManager();
if (pm != null) {
// try some limited direct injection
List<DownloadAnnounceResultPeer> temp = new ArrayList<>(peers_for_announce);
Random rand = new Random();
for (int i = 0; i < DIRECT_INJECT_PEER_MAX && temp.size() > 0; i++) {
DownloadAnnounceResultPeer peer = temp.remove(rand.nextInt(temp.size()));
log(download, "Injecting derived peer " + peer.getAddress() + " into " + download.getName());
Map<Object, Object> user_data = new HashMap<>();
user_data.put(Peer.PR_PRIORITY_CONNECTION, Boolean.TRUE);
pm.addPeer(peer.getAddress(), peer.getPort(), peer.getUDPPort(), peer.getProtocol() == DownloadAnnounceResultPeer.PROTOCOL_CRYPT, user_data);
}
}
}
if (download_state == Download.ST_DOWNLOADING || download_state == Download.ST_SEEDING) {
final DownloadAnnounceResultPeer[] peers = new DownloadAnnounceResultPeer[peers_for_announce.size()];
peers_for_announce.toArray(peers);
download.setAnnounceResult(new DownloadAnnounceResult() {
@Override
public Download getDownload() {
return (download);
}
@Override
public int getResponseType() {
return (DownloadAnnounceResult.RT_SUCCESS);
}
@Override
public int getReportedPeerCount() {
return (peers.length);
}
@Override
public int getSeedCount() {
return (seed_count);
}
@Override
public int getNonSeedCount() {
return (leecher_count);
}
@Override
public String getError() {
return (null);
}
@Override
public URL getURL() {
return (url_to_report);
}
@Override
public DownloadAnnounceResultPeer[] getPeers() {
return (peers);
}
@Override
public long getTimeToWait() {
return (retry / 1000);
}
@Override
public Map getExtensions() {
return (null);
}
});
}
// only inject the scrape result if the torrent is decentralised. If we do this for
// "normal" torrents then it can have unwanted side-effects, such as stopping the torrent
// due to ignore rules if there are no downloaders in the DHT - bthub backup, for example,
// isn't scrapable...
// hmm, ok, try being a bit more relaxed about this, inject the scrape if
// we have any peers.
boolean inject_scrape = leecher_count > 0;
DownloadScrapeResult result = download.getLastScrapeResult();
if (result == null || result.getResponseType() == DownloadScrapeResult.RT_ERROR) {
} else {
synchronized (scrape_injection_map) {
int[] prev = (int[]) scrape_injection_map.get(download);
if (prev != null && prev[0] == result.getSeedCount() && prev[1] == result.getNonSeedCount()) {
inject_scrape = true;
}
}
}
if (torrent.isDecentralised() || inject_scrape) {
// make sure that the injected scrape values are consistent
// with our currently connected peers
PeerManager pm = download.getPeerManager();
int local_seeds = 0;
int local_leechers = 0;
if (pm != null) {
Peer[] dl_peers = pm.getPeers();
for (int i = 0; i < dl_peers.length; i++) {
Peer dl_peer = dl_peers[i];
if (dl_peer.getPercentDoneInThousandNotation() == 1000) {
local_seeds++;
} else {
local_leechers++;
}
}
}
final int f_adj_seeds = Math.max(seed_count, local_seeds);
final int f_adj_leechers = Math.max(leecher_count, local_leechers);
synchronized (scrape_injection_map) {
scrape_injection_map.put(download, new int[] { f_adj_seeds, f_adj_leechers });
}
try {
this_mon.enter();
int[] run_data = running_downloads.get(download);
if (run_data == null) {
run_data = run_data_cache.get(download);
}
if (run_data != null) {
run_data[1] = f_adj_seeds;
run_data[2] = f_adj_leechers;
run_data[4] = (int) (SystemTime.getCurrentTime() / 1000);
}
} finally {
this_mon.exit();
}
download.setScrapeResult(new DownloadScrapeResult() {
@Override
public Download getDownload() {
return (download);
}
@Override
public int getResponseType() {
return (DownloadScrapeResult.RT_SUCCESS);
}
@Override
public int getSeedCount() {
return (f_adj_seeds);
}
@Override
public int getNonSeedCount() {
return (f_adj_leechers);
}
@Override
public long getScrapeStartTime() {
return (start);
}
@Override
public void setNextScrapeStartTime(long nextScrapeStartTime) {
}
@Override
public long getNextScrapeStartTime() {
return (SystemTime.getCurrentTime() + retry);
}
@Override
public String getStatus() {
return ("OK");
}
@Override
public URL getURL() {
return (url_to_report);
}
});
}
}
});
}
return (num_done);
}
Aggregations