use of com.biglybt.plugin.dht.DHTPluginOperationListener in project BiglyBT by BiglySoftware.
the class DHTTrackerPlugin method getTrackerPeerSources.
public TrackerPeerSource[] getTrackerPeerSources(final Torrent torrent) {
TrackerPeerSource vuze_dht = new TrackerPeerSourceAdapter() {
private volatile boolean query_done;
private volatile int status = TrackerPeerSource.ST_INITIALISING;
private volatile int seeds = 0;
private volatile int leechers = 0;
private void fixup() {
if (initialised_sem.isReleasedForever()) {
synchronized (this) {
if (query_done) {
return;
}
query_done = true;
status = TrackerPeerSource.ST_UPDATING;
}
dht.get(torrent.getHash(), "Availability lookup for '" + torrent.getName() + "'", DHTPlugin.FLAG_DOWNLOADING, NUM_WANT, ANNOUNCE_DERIVED_TIMEOUT, false, true, new DHTPluginOperationListener() {
@Override
public void starts(byte[] key) {
}
@Override
public boolean diversified() {
return (true);
}
@Override
public void valueRead(DHTPluginContact originator, DHTPluginValue value) {
if ((value.getFlags() & DHTPlugin.FLAG_DOWNLOADING) == 1) {
seeds++;
} else {
leechers++;
}
}
@Override
public void valueWritten(DHTPluginContact target, DHTPluginValue value) {
}
@Override
public void complete(byte[] key, boolean timeout_occurred) {
status = TrackerPeerSource.ST_ONLINE;
}
});
}
}
@Override
public int getType() {
return (TrackerPeerSource.TP_DHT);
}
@Override
public String getName() {
return ("Vuze DHT");
}
@Override
public int getStatus() {
fixup();
return (status);
}
@Override
public int getSeedCount() {
fixup();
int result = seeds;
if (result == 0 && status != TrackerPeerSource.ST_ONLINE) {
return (-1);
}
return (result);
}
@Override
public int getLeecherCount() {
fixup();
int result = leechers;
if (result == 0 && status != TrackerPeerSource.ST_ONLINE) {
return (-1);
}
return (result);
}
@Override
public int getPeers() {
return (-1);
}
@Override
public boolean isUpdating() {
return (status == TrackerPeerSource.ST_UPDATING);
}
};
if (alt_lookup_handler != null) {
TrackerPeerSource alt_dht = new TrackerPeerSourceAdapter() {
private volatile int status = TrackerPeerSource.ST_UPDATING;
private volatile int peers = 0;
{
alt_lookup_handler.get(torrent.getHash(), false, new DHTTrackerPluginAlt.LookupListener() {
@Override
public void foundPeer(InetSocketAddress address) {
peers++;
}
@Override
public boolean isComplete() {
return (false);
}
@Override
public void completed() {
status = TrackerPeerSource.ST_ONLINE;
}
});
}
@Override
public int getType() {
return (TrackerPeerSource.TP_DHT);
}
@Override
public String getName() {
return ("Mainline DHT");
}
@Override
public int getStatus() {
return (status);
}
@Override
public int getPeers() {
int result = peers;
if (result == 0 && status != TrackerPeerSource.ST_ONLINE) {
return (-1);
}
return (result);
}
@Override
public boolean isUpdating() {
return (status == TrackerPeerSource.ST_UPDATING);
}
};
return (new TrackerPeerSource[] { vuze_dht, alt_dht });
} else {
return (new TrackerPeerSource[] { vuze_dht });
}
}
use of com.biglybt.plugin.dht.DHTPluginOperationListener in project BiglyBT by BiglySoftware.
the class DHTTrackerPlugin method trackerGet.
protected int trackerGet(final Download download, final RegistrationDetails details, final boolean derived_only) {
final long start = SystemTime.getCurrentTime();
final Torrent torrent = download.getTorrent();
final URL url_to_report = torrent.isDecentralised() ? torrent.getAnnounceURL() : DEFAULT_URL;
trackerTarget[] targets = details.getTargets(false);
final long[] max_retry = { 0 };
final boolean do_alt = alt_lookup_handler != null && (!(download.getFlag(Download.FLAG_LOW_NOISE) || download.getFlag(Download.FLAG_LIGHT_WEIGHT)));
int num_done = 0;
for (int i = 0; i < targets.length; i++) {
final trackerTarget target = targets[i];
int target_type = target.getType();
if (target_type == REG_TYPE_FULL && derived_only) {
continue;
} else if (target_type == REG_TYPE_DERIVED && dht.isSleeping()) {
continue;
}
increaseActive(download);
num_done++;
final boolean is_complete = isComplete(download);
dht.get(target.getHash(), "Tracker announce for '" + download.getName() + "'" + target.getDesc(), is_complete ? DHTPlugin.FLAG_SEEDING : DHTPlugin.FLAG_DOWNLOADING, NUM_WANT, target_type == REG_TYPE_FULL ? ANNOUNCE_TIMEOUT : ANNOUNCE_DERIVED_TIMEOUT, false, false, new DHTPluginOperationListener() {
List<String> addresses = new ArrayList<>();
List<Integer> ports = new ArrayList<>();
List<Integer> udp_ports = new ArrayList<>();
List<Boolean> is_seeds = new ArrayList<>();
List<String> flags = new ArrayList<>();
int seed_count;
int leecher_count;
int i2p_seed_count;
int i2p_leecher_count;
volatile boolean complete;
{
if (do_alt) {
alt_lookup_handler.get(target.getHash(), is_complete, new DHTTrackerPluginAlt.LookupListener() {
@Override
public void foundPeer(InetSocketAddress address) {
alternativePeerRead(address);
}
@Override
public boolean isComplete() {
return (complete && addresses.size() > 5);
}
@Override
public void completed() {
}
});
}
}
@Override
public boolean diversified() {
return (true);
}
@Override
public void starts(byte[] key) {
}
private void alternativePeerRead(InetSocketAddress peer) {
boolean try_injection = false;
synchronized (this) {
if (complete) {
try_injection = addresses.size() < 5;
} else {
try {
addresses.add(peer.getAddress().getHostAddress());
ports.add(peer.getPort());
udp_ports.add(0);
flags.add(null);
is_seeds.add(false);
leecher_count++;
} catch (Throwable e) {
}
}
}
if (try_injection) {
PeerManager pm = download.getPeerManager();
if (pm != null) {
pm.peerDiscovered(PEPeerSource.PS_DHT, peer.getAddress().getHostAddress(), peer.getPort(), 0, NetworkManager.getCryptoRequired(NetworkManager.CRYPTO_OVERRIDE_NONE));
}
}
}
@Override
public void valueRead(DHTPluginContact originator, DHTPluginValue value) {
synchronized (this) {
if (complete) {
return;
}
try {
String[] tokens = new String(value.getValue()).split(";");
String tcp_part = tokens[0].trim();
int sep = tcp_part.indexOf(':');
String ip_str = null;
String tcp_port_str;
if (sep == -1) {
tcp_port_str = tcp_part;
} else {
ip_str = tcp_part.substring(0, sep);
tcp_port_str = tcp_part.substring(sep + 1);
}
int tcp_port = Integer.parseInt(tcp_port_str);
if (tcp_port > 0 && tcp_port < 65536) {
String flag_str = null;
int udp_port = -1;
boolean has_i2p = false;
try {
for (int i = 1; i < tokens.length; i++) {
String token = tokens[i].trim();
if (token.length() > 0) {
if (Character.isDigit(token.charAt(0))) {
udp_port = Integer.parseInt(token);
if (udp_port <= 0 || udp_port >= 65536) {
udp_port = -1;
}
} else {
flag_str = token;
if (flag_str.contains("I")) {
has_i2p = true;
}
}
}
}
} catch (Throwable e) {
}
addresses.add(ip_str == null ? originator.getAddress().getAddress().getHostAddress() : ip_str);
ports.add(new Integer(tcp_port));
udp_ports.add(new Integer(udp_port == -1 ? originator.getAddress().getPort() : udp_port));
flags.add(flag_str);
if ((value.getFlags() & DHTPlugin.FLAG_DOWNLOADING) == 1) {
leecher_count++;
is_seeds.add(Boolean.FALSE);
if (has_i2p) {
i2p_leecher_count++;
}
} else {
is_seeds.add(Boolean.TRUE);
seed_count++;
if (has_i2p) {
i2p_seed_count++;
}
}
}
} catch (Throwable e) {
// in case we get crap back (someone spamming the DHT) just
// silently ignore
}
}
}
@Override
public void valueWritten(DHTPluginContact target, DHTPluginValue value) {
}
@Override
public void complete(byte[] key, boolean timeout_occurred) {
synchronized (this) {
if (complete) {
return;
}
complete = true;
}
if (target.getType() == REG_TYPE_FULL || (target.getType() == REG_TYPE_DERIVED && seed_count + leecher_count > 1)) {
log(download, "Get of '" + target.getDesc() + "' completed (elapsed=" + TimeFormatter.formatColonMillis(SystemTime.getCurrentTime() - start) + "), addresses=" + addresses.size() + ", seeds=" + seed_count + ", leechers=" + leecher_count);
}
decreaseActive(download);
int peers_found = addresses.size();
List<DownloadAnnounceResultPeer> peers_for_announce = new ArrayList<>();
// scale min and max based on number of active torrents
// we don't want more than a few announces a minute
int announce_per_min = 4;
int num_active = query_map.size();
int announce_min = Math.max(ANNOUNCE_MIN_DEFAULT, (num_active / announce_per_min) * 60 * 1000);
int announce_max = derived_only ? ANNOUNCE_MAX_DERIVED_ONLY : ANNOUNCE_MAX;
announce_min = Math.min(announce_min, announce_max);
current_announce_interval = announce_min;
final long retry = announce_min + peers_found * (long) (announce_max - announce_min) / NUM_WANT;
int download_state = download.getState();
boolean we_are_seeding = download_state == Download.ST_SEEDING;
try {
this_mon.enter();
int[] run_data = running_downloads.get(download);
if (run_data != null) {
boolean full = target.getType() == REG_TYPE_FULL;
int peer_count = we_are_seeding ? leecher_count : (seed_count + leecher_count);
run_data[1] = full ? seed_count : Math.max(run_data[1], seed_count);
run_data[2] = full ? leecher_count : Math.max(run_data[2], leecher_count);
run_data[3] = full ? peer_count : Math.max(run_data[3], peer_count);
run_data[4] = (int) (SystemTime.getCurrentTime() / 1000);
long absolute_retry = SystemTime.getCurrentTime() + retry;
if (absolute_retry > max_retry[0]) {
// only update next query time if none set yet
// or we appear to have set the existing one. If we
// don't do this then we'll overwrite any rescheduled
// announces
Long existing = (Long) query_map.get(download);
if (existing == null || existing.longValue() == max_retry[0]) {
max_retry[0] = absolute_retry;
query_map.put(download, new Long(absolute_retry));
}
}
}
} finally {
this_mon.exit();
}
putDetails put_details = details.getPutDetails();
String ext_address = put_details.getIPOverride();
if (ext_address == null) {
ext_address = dht.getLocalAddress().getAddress().getAddress().getHostAddress();
}
if (put_details.hasI2P()) {
if (we_are_seeding) {
if (i2p_seed_count > 0) {
i2p_seed_count--;
}
} else {
if (i2p_leecher_count > 0) {
i2p_leecher_count--;
}
}
}
if (i2p_seed_count + i2p_leecher_count > 0) {
download.setUserData(DOWNLOAD_USER_DATA_I2P_SCRAPE_KEY, new int[] { i2p_seed_count, i2p_leecher_count });
} else {
download.setUserData(DOWNLOAD_USER_DATA_I2P_SCRAPE_KEY, null);
}
for (int i = 0; i < addresses.size(); i++) {
if (we_are_seeding && ((Boolean) is_seeds.get(i)).booleanValue()) {
continue;
}
// remove ourselves
String ip = (String) addresses.get(i);
if (ip.equals(ext_address)) {
if (((Integer) ports.get(i)).intValue() == put_details.getTCPPort() && ((Integer) udp_ports.get(i)).intValue() == put_details.getUDPPort()) {
continue;
}
}
final int f_i = i;
peers_for_announce.add(new DownloadAnnounceResultPeer() {
@Override
public String getSource() {
return (PEPeerSource.PS_DHT);
}
@Override
public String getAddress() {
return ((String) addresses.get(f_i));
}
@Override
public int getPort() {
return (((Integer) ports.get(f_i)).intValue());
}
@Override
public int getUDPPort() {
return (((Integer) udp_ports.get(f_i)).intValue());
}
@Override
public byte[] getPeerID() {
return (null);
}
@Override
public short getProtocol() {
String flag = (String) flags.get(f_i);
short protocol = DownloadAnnounceResultPeer.PROTOCOL_NORMAL;
if (flag != null) {
if (flag.contains("C")) {
protocol = DownloadAnnounceResultPeer.PROTOCOL_CRYPT;
}
}
return (protocol);
}
});
}
if (target.getType() == REG_TYPE_DERIVED && peers_for_announce.size() > 0) {
PeerManager pm = download.getPeerManager();
if (pm != null) {
// try some limited direct injection
List<DownloadAnnounceResultPeer> temp = new ArrayList<>(peers_for_announce);
Random rand = new Random();
for (int i = 0; i < DIRECT_INJECT_PEER_MAX && temp.size() > 0; i++) {
DownloadAnnounceResultPeer peer = temp.remove(rand.nextInt(temp.size()));
log(download, "Injecting derived peer " + peer.getAddress() + " into " + download.getName());
Map<Object, Object> user_data = new HashMap<>();
user_data.put(Peer.PR_PRIORITY_CONNECTION, Boolean.TRUE);
pm.addPeer(peer.getAddress(), peer.getPort(), peer.getUDPPort(), peer.getProtocol() == DownloadAnnounceResultPeer.PROTOCOL_CRYPT, user_data);
}
}
}
if (download_state == Download.ST_DOWNLOADING || download_state == Download.ST_SEEDING) {
final DownloadAnnounceResultPeer[] peers = new DownloadAnnounceResultPeer[peers_for_announce.size()];
peers_for_announce.toArray(peers);
download.setAnnounceResult(new DownloadAnnounceResult() {
@Override
public Download getDownload() {
return (download);
}
@Override
public int getResponseType() {
return (DownloadAnnounceResult.RT_SUCCESS);
}
@Override
public int getReportedPeerCount() {
return (peers.length);
}
@Override
public int getSeedCount() {
return (seed_count);
}
@Override
public int getNonSeedCount() {
return (leecher_count);
}
@Override
public String getError() {
return (null);
}
@Override
public URL getURL() {
return (url_to_report);
}
@Override
public DownloadAnnounceResultPeer[] getPeers() {
return (peers);
}
@Override
public long getTimeToWait() {
return (retry / 1000);
}
@Override
public Map getExtensions() {
return (null);
}
});
}
// only inject the scrape result if the torrent is decentralised. If we do this for
// "normal" torrents then it can have unwanted side-effects, such as stopping the torrent
// due to ignore rules if there are no downloaders in the DHT - bthub backup, for example,
// isn't scrapable...
// hmm, ok, try being a bit more relaxed about this, inject the scrape if
// we have any peers.
boolean inject_scrape = leecher_count > 0;
DownloadScrapeResult result = download.getLastScrapeResult();
if (result == null || result.getResponseType() == DownloadScrapeResult.RT_ERROR) {
} else {
synchronized (scrape_injection_map) {
int[] prev = (int[]) scrape_injection_map.get(download);
if (prev != null && prev[0] == result.getSeedCount() && prev[1] == result.getNonSeedCount()) {
inject_scrape = true;
}
}
}
if (torrent.isDecentralised() || inject_scrape) {
// make sure that the injected scrape values are consistent
// with our currently connected peers
PeerManager pm = download.getPeerManager();
int local_seeds = 0;
int local_leechers = 0;
if (pm != null) {
Peer[] dl_peers = pm.getPeers();
for (int i = 0; i < dl_peers.length; i++) {
Peer dl_peer = dl_peers[i];
if (dl_peer.getPercentDoneInThousandNotation() == 1000) {
local_seeds++;
} else {
local_leechers++;
}
}
}
final int f_adj_seeds = Math.max(seed_count, local_seeds);
final int f_adj_leechers = Math.max(leecher_count, local_leechers);
synchronized (scrape_injection_map) {
scrape_injection_map.put(download, new int[] { f_adj_seeds, f_adj_leechers });
}
try {
this_mon.enter();
int[] run_data = running_downloads.get(download);
if (run_data == null) {
run_data = run_data_cache.get(download);
}
if (run_data != null) {
run_data[1] = f_adj_seeds;
run_data[2] = f_adj_leechers;
run_data[4] = (int) (SystemTime.getCurrentTime() / 1000);
}
} finally {
this_mon.exit();
}
download.setScrapeResult(new DownloadScrapeResult() {
@Override
public Download getDownload() {
return (download);
}
@Override
public int getResponseType() {
return (DownloadScrapeResult.RT_SUCCESS);
}
@Override
public int getSeedCount() {
return (f_adj_seeds);
}
@Override
public int getNonSeedCount() {
return (f_adj_leechers);
}
@Override
public long getScrapeStartTime() {
return (start);
}
@Override
public void setNextScrapeStartTime(long nextScrapeStartTime) {
}
@Override
public long getNextScrapeStartTime() {
return (SystemTime.getCurrentTime() + retry);
}
@Override
public String getStatus() {
return ("OK");
}
@Override
public URL getURL() {
return (url_to_report);
}
});
}
}
});
}
return (num_done);
}
Aggregations