Search in sources :

Example 1 with TRTrackerScraperResponseImpl

use of com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl in project BiglyBT by BiglySoftware.

the class TRTrackerBTScraperImpl method setScrape.

public void setScrape(TOTorrent torrent, URL url, DownloadScrapeResult result) {
    if (torrent != null && result != null) {
        TRTrackerScraperResponseImpl resp = tracker_checker.getHashData(torrent, url);
        URL result_url = result.getURL();
        boolean update_is_dht = TorrentUtils.isDecentralised(result_url);
        if (resp != null && (resp.getStatus() == TRTrackerScraperResponse.ST_ERROR || (resp.isDHTBackup() && update_is_dht))) {
            resp.setDHTBackup(update_is_dht);
            resp.setScrapeStartTime(result.getScrapeStartTime());
            // leave nextScrapeStartTime alone as we still want the existing
            // scraping mechanism to kick in and check the torrent's tracker
            resp.setStatus(result.getResponseType() == DownloadScrapeResult.RT_SUCCESS ? TRTrackerScraperResponse.ST_ONLINE : TRTrackerScraperResponse.ST_ERROR, result.getStatus() + " (" + (update_is_dht ? MessageText.getString("dht.backup.only") : (result_url == null ? "<null>" : result_url.getHost())) + ")");
            // call this last before dispatching listeners as it does another dispatch by itself ~~
            resp.setSeedsPeers(result.getSeedCount(), result.getNonSeedCount());
            scraper.scrapeReceived(resp);
        }
    }
}
Also used : TRTrackerScraperResponseImpl(com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl) URL(java.net.URL)

Example 2 with TRTrackerScraperResponseImpl

use of com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl in project BiglyBT by BiglySoftware.

the class TrackerStatus method runScrapesSupport.

protected void runScrapesSupport(ArrayList<TRTrackerScraperResponseImpl> allResponses, boolean force) {
    try {
        if (Logger.isEnabled()) {
            Logger.log(new LogEvent(LOGID, "TrackerStatus: scraping '" + scrapeURL + "', for " + allResponses.size() + " of " + hashes.size() + " hashes" + ", single_hash_scrapes: " + (bSingleHashScrapes ? "Y" : "N")));
        }
        boolean original_bSingleHashScrapes = bSingleHashScrapes;
        boolean disable_all_scrapes = !COConfigurationManager.getBooleanParameter("Tracker Client Scrape Enable");
        byte[] scrape_reply = null;
        List<HashWrapper> hashesInQuery = new ArrayList<>(allResponses.size());
        List<TRTrackerScraperResponseImpl> responsesInQuery = new ArrayList<>(allResponses.size());
        List<HashWrapper> hashesForUDP = new ArrayList<>();
        List<TRTrackerScraperResponseImpl> responsesForUDP = new ArrayList<>();
        List<TRTrackerScraperResponseImpl> activeResponses = responsesInQuery;
        try {
            // if URL already includes a query component then just append our
            // params
            HashWrapper one_of_the_hashes = null;
            // TRTrackerScraperResponseImpl one_of_the_responses = null;
            char first_separator = scrapeURL.indexOf('?') == -1 ? '?' : '&';
            String info_hash = "";
            String flags = "";
            for (TRTrackerScraperResponseImpl response : allResponses) {
                HashWrapper hash = response.getHash();
                if (Logger.isEnabled())
                    Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, "TrackerStatus: " + scrapeURL + ": scraping, single_hash_scrapes = " + bSingleHashScrapes));
                if (!scraper.isNetworkEnabled(hash, tracker_url)) {
                    response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                    response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "networkdisabled"));
                    scraper.scrapeReceived(response);
                } else if ((!force) && (disable_all_scrapes || !scraper.isTorrentScrapable(hash))) {
                    response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                    response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "disabled"));
                    scraper.scrapeReceived(response);
                } else {
                    hashesInQuery.add(hash);
                    responsesInQuery.add(response);
                    response.setStatus(TRTrackerScraperResponse.ST_SCRAPING, MessageText.getString(SS + "scraping"));
                    // technically haven't recieved a scrape yet, but we need
                    // to notify listeners (the ones that display status)
                    scraper.scrapeReceived(response);
                    // the client-id stuff RELIES on info_hash being the FIRST
                    // parameter added by
                    // us to the URL, so don't change it!
                    info_hash += ((one_of_the_hashes != null) ? '&' : first_separator) + "info_hash=";
                    info_hash += URLEncoder.encode(new String(hash.getBytes(), Constants.BYTE_ENCODING), Constants.BYTE_ENCODING).replaceAll("\\+", "%20");
                    Object[] extensions = scraper.getExtensions(hash);
                    if (extensions != null) {
                        if (extensions[0] != null) {
                            info_hash += (String) extensions[0];
                        }
                        flags += (Character) extensions[1];
                    } else {
                        flags += TRTrackerScraperClientResolver.FL_NONE;
                    }
                    one_of_the_hashes = hash;
                    if (hashesForUDP.size() < 70) {
                        hashesForUDP.add(hash);
                        responsesForUDP.add(response);
                    }
                }
            }
            if (one_of_the_hashes == null) {
                return;
            }
            String request = scrapeURL + info_hash;
            if (az_tracker) {
                String port_details = TRTrackerUtils.getPortsForURL();
                request += port_details;
                request += "&azsf=" + flags + "&azver=" + TRTrackerAnnouncer.AZ_TRACKER_VERSION_CURRENT;
            }
            URL reqUrl = new URL(request);
            if (Logger.isEnabled())
                Logger.log(new LogEvent(LOGID, "Accessing scrape interface using url : " + reqUrl));
            ByteArrayOutputStream message = new ByteArrayOutputStream();
            long scrapeStartTime = SystemTime.getCurrentTime();
            URL redirect_url = null;
            String protocol = reqUrl.getProtocol();
            URL udpScrapeURL = null;
            boolean auto_probe = false;
            if (protocol.equalsIgnoreCase("udp")) {
                if (udpScrapeEnabled) {
                    udpScrapeURL = reqUrl;
                } else {
                    throw (new IOException("UDP Tracker protocol disabled"));
                }
            } else if (protocol.equalsIgnoreCase("http") && !az_tracker && scrapeCount % autoUDPscrapeEvery == 0 && udpProbeEnabled && udpScrapeEnabled) {
                String tracker_network = AENetworkClassifier.categoriseAddress(reqUrl.getHost());
                if (tracker_network == AENetworkClassifier.AT_PUBLIC) {
                    udpScrapeURL = new URL(reqUrl.toString().replaceFirst("^http", "udp"));
                    auto_probe = true;
                }
            }
            if (udpScrapeURL == null) {
                if (!az_tracker && !tcpScrapeEnabled) {
                    String tracker_network = AENetworkClassifier.categoriseAddress(reqUrl.getHost());
                    if (tracker_network == AENetworkClassifier.AT_PUBLIC) {
                        throw (new IOException("HTTP Tracker protocol disabled"));
                    }
                }
            }
            try {
                // set context in case authentication dialog is required
                TorrentUtils.setTLSTorrentHash(one_of_the_hashes);
                if (udpScrapeURL != null) {
                    activeResponses = responsesForUDP;
                    boolean success = scrapeUDP(reqUrl, message, hashesForUDP, !auto_probe);
                    if ((!success || message.size() == 0) && !protocol.equalsIgnoreCase("udp")) {
                        // automatic UDP probe failed, use HTTP again
                        udpScrapeURL = null;
                        message.reset();
                        if (autoUDPscrapeEvery < 16)
                            autoUDPscrapeEvery <<= 1;
                        if (Logger.isEnabled())
                            Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape [" + scrapeURL + "] to udp failed, will retry in " + autoUDPscrapeEvery + " scrapes"));
                    } else if (success && !protocol.equalsIgnoreCase("udp")) {
                        if (Logger.isEnabled())
                            Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape [" + scrapeURL + "] to udp successful"));
                        autoUDPscrapeEvery = 1;
                        TRTrackerUtils.setUDPProbeResult(reqUrl, true);
                    }
                }
                scrapeCount++;
                if (udpScrapeURL == null) {
                    activeResponses = responsesInQuery;
                    redirect_url = scrapeHTTP(hashesInQuery, reqUrl, message);
                }
            } finally {
                TorrentUtils.setTLSTorrentHash(null);
            }
            scrape_reply = message.toByteArray();
            Map map = BDecoder.decode(scrape_reply);
            boolean this_is_az_tracker = map.get("aztracker") != null;
            if (az_tracker != this_is_az_tracker) {
                az_tracker = this_is_az_tracker;
                TRTrackerUtils.setAZTracker(tracker_url, az_tracker);
            }
            Map mapFiles = (Map) map.get("files");
            if (Logger.isEnabled())
                Logger.log(new LogEvent(LOGID, "Response from scrape interface " + scrapeURL + ": " + ((mapFiles == null) ? "null" : "" + mapFiles.size()) + " returned"));
            int iMinRequestInterval = 0;
            if (map != null) {
                /* "The spec":
					 * files
					 *   infohash
					 *   complete
					 *   incomplete
					 *   downloaded
					 *   name
					 *  flags
					 *    min_request_interval
					 *  failure reason
					 */
                /*
					 * files infohash complete incomplete downloaded name flags
					 * min_request_interval
					 */
                Map mapFlags = (Map) map.get("flags");
                if (mapFlags != null) {
                    Long longScrapeValue = (Long) mapFlags.get("min_request_interval");
                    if (longScrapeValue != null)
                        iMinRequestInterval = longScrapeValue.intValue();
                    // Tracker owners want this log entry
                    if (Logger.isEnabled())
                        Logger.log(new LogEvent(LOGID, "Received min_request_interval of " + iMinRequestInterval));
                }
            }
            if (mapFiles == null || mapFiles.size() == 0) {
                if (bSingleHashScrapes && map.containsKey("complete") && map.containsKey("incomplete")) {
                    int complete = MapUtils.getMapInt(map, "complete", 0);
                    int incomplete = MapUtils.getMapInt(map, "incomplete", 0);
                    TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) activeResponses.get(0);
                    response.setPeers(incomplete);
                    response.setSeeds(complete);
                    int minRequestInterval = MapUtils.getMapInt(map, "interval", FAULTY_SCRAPE_RETRY_INTERVAL);
                    int scrapeInterval = TRTrackerScraperResponseImpl.calcScrapeIntervalSecs(minRequestInterval, complete);
                    long nextScrapeTime = SystemTime.getCurrentTime() + (scrapeInterval * 1000);
                    response.setNextScrapeStartTime(nextScrapeTime);
                    response.setStatus(TRTrackerScraperResponse.ST_ONLINE, "Tracker returned Announce from scrape call");
                    response.setScrapeStartTime(scrapeStartTime);
                    scraper.scrapeReceived(response);
                    return;
                }
                // custom extension here to handle "failure reason" returned for
                // scrapes
                byte[] failure_reason_bytes = map == null ? null : (byte[]) map.get("failure reason");
                if (failure_reason_bytes != null) {
                    long nextScrapeTime = SystemTime.getCurrentTime() + ((iMinRequestInterval == 0) ? FAULTY_SCRAPE_RETRY_INTERVAL : iMinRequestInterval * 1000);
                    for (TRTrackerScraperResponseImpl response : activeResponses) {
                        response.setNextScrapeStartTime(nextScrapeTime);
                        response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + new String(failure_reason_bytes, Constants.DEFAULT_ENCODING));
                        // notifiy listeners
                        scraper.scrapeReceived(response);
                    }
                } else {
                    if (activeResponses.size() > 1) {
                        // multi were requested, 0 returned. Therefore, multi not
                        // supported
                        bSingleHashScrapes = true;
                        if (Logger.isEnabled())
                            Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL + " doesn't properly support " + "multi-hash scrapes"));
                        for (TRTrackerScraperResponseImpl response : activeResponses) {
                            response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "invalid"));
                            // notifiy listeners
                            scraper.scrapeReceived(response);
                        }
                    } else {
                        long nextScrapeTime = SystemTime.getCurrentTime() + ((iMinRequestInterval == 0) ? NOHASH_RETRY_INTERVAL : iMinRequestInterval * 1000);
                        // 1 was requested, 0 returned. Therefore, hash not found.
                        TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) activeResponses.get(0);
                        response.setNextScrapeStartTime(nextScrapeTime);
                        response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "nohash"));
                        // notifiy listeners
                        scraper.scrapeReceived(response);
                    }
                }
                return;
            }
            /*
				 * If we requested mutliple hashes, but only one was returned, revert
				 * to Single Hash Scrapes, but continue on to process the one has that
				 * was returned (it may be a random one from the list)
				 */
            if (!bSingleHashScrapes && activeResponses.size() > 1 && mapFiles.size() == 1) {
                bSingleHashScrapes = true;
                if (Logger.isEnabled())
                    Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL + " only returned " + mapFiles.size() + " hash scrape(s), but we asked for " + activeResponses.size()));
            }
            for (TRTrackerScraperResponseImpl response : activeResponses) {
                // LGLogger.log( "decoding response #" +i+ ": " +
                // ByteFormatter.nicePrint( response.getHash(), true ) );
                // retrieve the scrape data for the relevent infohash
                Map scrapeMap = (Map) mapFiles.get(new String(response.getHash().getBytes(), Constants.BYTE_ENCODING));
                if (scrapeMap == null) {
                    // some trackers that return only 1 hash return a random one!
                    if (activeResponses.size() == 1 || mapFiles.size() != 1) {
                        response.setNextScrapeStartTime(SystemTime.getCurrentTime() + NOHASH_RETRY_INTERVAL);
                        response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "nohash"));
                        // notifiy listeners
                        scraper.scrapeReceived(response);
                    } else if (scraper.isTorrentScrapable(response.getHash())) {
                        // This tracker doesn't support multiple hash requests.
                        // revert status to what it was
                        response.revertStatus();
                        if (response.getStatus() == TRTrackerScraperResponse.ST_SCRAPING) {
                            // System.out.println("Hash " +
                            // ByteFormatter.nicePrint(response.getHash(), true) + "
                            // mysteriously reverted to ST_SCRAPING!");
                            // response.setStatus(TRTrackerScraperResponse.ST_ONLINE, "");
                            response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                            response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "invalid"));
                        } else {
                            // force single-hash scrapes here
                            bSingleHashScrapes = true;
                            if (original_bSingleHashScrapes) {
                                response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                            }
                        }
                        // notifiy listeners
                        scraper.scrapeReceived(response);
                    // if this was the first scrape request in the list,
                    // TrackerChecker
                    // will attempt to scrape again because we didn't reset the
                    // nextscrapestarttime. But the next time, bSingleHashScrapes
                    // will be true, and only 1 has will be requested, so there
                    // will not be infinite looping
                    }
                // System.out.println("scrape: hash missing from reply");
                } else {
                    // retrieve values
                    Long l_seeds = (Long) scrapeMap.get("complete");
                    Long l_peers = (Long) scrapeMap.get("incomplete");
                    Long l_comp = (Long) scrapeMap.get("downloaded");
                    // expected but deal with missing as some trackers ommit :(
                    int seeds = l_seeds == null ? 0 : l_seeds.intValue();
                    // expected but deal with missing
                    int peers = l_peers == null ? 0 : l_peers.intValue();
                    // optional
                    int completed = l_comp == null ? -1 : l_comp.intValue();
                    // make sure we dont use invalid replies
                    if (seeds < 0 || peers < 0 || completed < -1) {
                        if (Logger.isEnabled()) {
                            HashWrapper hash = response.getHash();
                            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, "Invalid scrape response from '" + reqUrl + "': map = " + scrapeMap));
                        }
                        // manager will run scrapes for each individual hash.
                        if (activeResponses.size() > 1 && bSingleHashScrapes) {
                            response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "invalid"));
                            scraper.scrapeReceived(response);
                            continue;
                        }
                        response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                        response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString(SSErr + "invalid") + " " + (seeds < 0 ? MessageText.getString("MyTorrentsView.seeds") + " == " + seeds + ". " : "") + (peers < 0 ? MessageText.getString("MyTorrentsView.peers") + " == " + peers + ". " : "") + (completed < 0 ? MessageText.getString("MyTorrentsView.completed") + " == " + completed + ". " : ""));
                        scraper.scrapeReceived(response);
                        continue;
                    }
                    int scrapeInterval = TRTrackerScraperResponseImpl.calcScrapeIntervalSecs(iMinRequestInterval, seeds);
                    long nextScrapeTime = SystemTime.getCurrentTime() + (scrapeInterval * 1000);
                    response.setNextScrapeStartTime(nextScrapeTime);
                    // create the response
                    response.setScrapeStartTime(scrapeStartTime);
                    response.setSeeds(seeds);
                    response.setPeers(peers);
                    response.setCompleted(completed);
                    response.setStatus(TRTrackerScraperResponse.ST_ONLINE, MessageText.getString(SS + "ok"));
                    // notifiy listeners
                    scraper.scrapeReceived(response);
                    try {
                        if (activeResponses.size() == 1 && redirect_url != null) {
                            // we only deal with redirects for single urls - if the tracker wants to
                            // redirect one of a group is has to force single-hash scrapes anyway
                            String redirect_str = redirect_url.toString();
                            int s_pos = redirect_str.indexOf("/scrape");
                            if (s_pos != -1) {
                                URL new_url = new URL(redirect_str.substring(0, s_pos) + "/announce" + redirect_str.substring(s_pos + 7));
                                if (scraper.redirectTrackerUrl(response.getHash(), tracker_url, new_url)) {
                                    removeHash(response.getHash());
                                }
                            }
                        }
                    } catch (Throwable e) {
                        Debug.printStackTrace(e);
                    }
                }
            }
        // for responses
        } catch (NoClassDefFoundError ignoreSSL) {
            // javax/net/ssl/SSLSocket
            for (TRTrackerScraperResponseImpl response : activeResponses) {
                response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + ignoreSSL.getMessage());
                // notifiy listeners
                scraper.scrapeReceived(response);
            }
        } catch (FileNotFoundException e) {
            for (TRTrackerScraperResponseImpl response : activeResponses) {
                response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + MessageText.getString("DownloadManager.error.filenotfound"));
                // notifiy listeners
                scraper.scrapeReceived(response);
            }
        } catch (SocketException e) {
            setAllError(activeResponses, e);
        } catch (SocketTimeoutException e) {
            setAllError(activeResponses, e);
        } catch (UnknownHostException e) {
            setAllError(activeResponses, e);
        } catch (PRUDPPacketHandlerException e) {
            setAllError(activeResponses, e);
        } catch (BEncodingException e) {
            setAllError(activeResponses, e);
        } catch (Exception e) {
            // for apache we can get error 414 - URL too long. simplest solution
            // for this
            // is to fall back to single scraping
            String error_message = e.getMessage();
            if (error_message != null) {
                if (error_message.contains(" 500 ") || error_message.contains(" 400 ") || error_message.contains(" 403 ") || error_message.contains(" 404 ") || error_message.contains(" 501 ")) {
                    // various errors that have a 99% chance of happening on
                    // any other scrape request
                    setAllError(activeResponses, e);
                    return;
                }
                if (error_message.contains("414") && !bSingleHashScrapes) {
                    bSingleHashScrapes = true;
                    // Skip the setuing up the response.  We want to scrape again
                    return;
                }
            }
            String msg = Debug.getNestedExceptionMessage(e);
            if (scrape_reply != null) {
                String trace_data;
                if (scrape_reply.length <= 150) {
                    trace_data = new String(scrape_reply);
                } else {
                    trace_data = new String(scrape_reply, 0, 150) + "...";
                }
                msg += " [" + trace_data + "]";
            }
            for (TRTrackerScraperResponseImpl response : activeResponses) {
                if (Logger.isEnabled()) {
                    HashWrapper hash = response.getHash();
                    Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, LogEvent.LT_ERROR, "Error from scrape interface " + scrapeURL + " : " + msg + " (" + e.getClass() + ")"));
                }
                response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
                response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText.getString(SS + "error") + msg);
                // notifiy listeners
                scraper.scrapeReceived(response);
            }
        }
    } catch (Throwable t) {
        Debug.out("runScrapesSupport failed", t);
    } finally {
        numActiveScrapes.decrementAndGet();
    }
}
Also used : TRTrackerScraperResponseImpl(com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl) FileNotFoundException(java.io.FileNotFoundException) LogEvent(com.biglybt.core.logging.LogEvent) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) ClientIDException(com.biglybt.pif.clientid.ClientIDException) PRUDPPacketHandlerException(com.biglybt.net.udp.uc.PRUDPPacketHandlerException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) PRUDPPacketHandlerException(com.biglybt.net.udp.uc.PRUDPPacketHandlerException)

Example 3 with TRTrackerScraperResponseImpl

use of com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl in project BiglyBT by BiglySoftware.

the class TrackerChecker method getHashData.

/**
 * Retrieves the last cached Scraper Response for the supplied tracker URL
 *  and hash. If no cache has exists for the hash, one is created.
 *
 * @return The cached scrape response.  Can be null.
 */
protected TRTrackerScraperResponseImpl getHashData(URL trackerUrl, final HashWrapper hash) {
    // can be null when first called and url not yet set up...
    if (trackerUrl == null) {
        return (null);
    }
    if (trackerUrl.getHost().endsWith(".dht")) {
        return (null);
    }
    TRTrackerScraperResponseImpl data = null;
    // DON'T USE URL as a key in the trackers map, use the string version. If you
    // use a URL then the "containsKey" method does a URL.equals test. This does not
    // simply check on str equivalence, it tries to resolve the host name. this can
    // result in significant hangs (several seconds....)
    String url_str = trackerUrl.toString();
    TrackerStatus ts = null;
    try {
        trackers_mon.enter();
        ts = (TrackerStatus) trackers.get(url_str);
        if (ts != null) {
            data = ts.getHashData(hash);
        } else {
            // System.out.println( "adding hash for " + trackerUrl + " : " + ByteFormatter.nicePrint(hashBytes, true));
            ts = new TrackerStatus(this, scraper.getScraper(), trackerUrl);
            trackers.put(url_str, ts);
            if (!ts.isTrackerScrapeUrlValid()) {
                if (Logger.isEnabled()) {
                    Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, LogEvent.LT_ERROR, "Can't scrape using url '" + trackerUrl + "' as it doesn't end in " + "'/announce', skipping."));
                }
            }
        }
    } finally {
        trackers_mon.exit();
    }
    if (data == null) {
        data = ts.addHash(hash);
    }
    return data;
}
Also used : LogEvent(com.biglybt.core.logging.LogEvent) TRTrackerScraperResponseImpl(com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl)

Example 4 with TRTrackerScraperResponseImpl

use of com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl in project BiglyBT by BiglySoftware.

the class TrackerStatus method updateSingleHash.

protected void updateSingleHash(HashWrapper hash, boolean force, boolean async) {
    if (scrapeURL == null) {
        if (Logger.isEnabled()) {
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, "TrackerStatus: " + scrapeURL + ": scrape cancelled.. url null"));
        }
        return;
    }
    try {
        ArrayList<TRTrackerScraperResponseImpl> responsesToUpdate = new ArrayList<>();
        TRTrackerScraperResponseImpl response;
        try {
            hashes_mon.enter();
            response = (TRTrackerScraperResponseImpl) hashes.get(hash);
        } finally {
            hashes_mon.exit();
        }
        if (response == null) {
            response = addHash(hash);
        }
        long lMainNextScrapeStartTime = response.getNextScrapeStartTime();
        if (!force && lMainNextScrapeStartTime > SystemTime.getCurrentTime()) {
            if (Logger.isEnabled()) {
                Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, "TrackerStatus: " + scrapeURL + ": scrape cancelled.. not forced and still " + (lMainNextScrapeStartTime - SystemTime.getCurrentTime()) + "ms"));
            }
            return;
        }
        // Set status id to SCRAPING, but leave status string until we actually
        // do the scrape
        response.setStatus(TRTrackerScraperResponse.ST_SCRAPING, MessageText.getString(SS + "scraping.queued"));
        if (Logger.isEnabled()) {
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, "TrackerStatus: " + scrapeURL + ": setting to scraping"));
        }
        responsesToUpdate.add(response);
        if (!bSingleHashScrapes) {
            try {
                hashes_mon.enter();
                Iterator iterHashes = hashes.values().iterator();
                while (iterHashes.hasNext() && responsesToUpdate.size() < GROUP_SCRAPES_LIMIT) {
                    TRTrackerScraperResponseImpl r = (TRTrackerScraperResponseImpl) iterHashes.next();
                    if (!r.getHash().equals(hash)) {
                        long lTimeDiff = Math.abs(lMainNextScrapeStartTime - r.getNextScrapeStartTime());
                        if (lTimeDiff <= GROUP_SCRAPES_MS && r.getStatus() != TRTrackerScraperResponse.ST_SCRAPING) {
                            r.setStatus(TRTrackerScraperResponse.ST_SCRAPING, MessageText.getString(SS + "scraping.queued"));
                            if (Logger.isEnabled()) {
                                Logger.log(new LogEvent(TorrentUtils.getDownloadManager(r.getHash()), LOGID, "TrackerStatus:" + scrapeURL + ": setting to scraping via group scrape"));
                            }
                            responsesToUpdate.add(r);
                        }
                    }
                }
            } finally {
                hashes_mon.exit();
            }
        }
        runScrapes(responsesToUpdate, force, async);
    } catch (Throwable t) {
        Debug.out("updateSingleHash() exception", t);
    }
}
Also used : LogEvent(com.biglybt.core.logging.LogEvent) TRTrackerScraperResponseImpl(com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl)

Example 5 with TRTrackerScraperResponseImpl

use of com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl in project BiglyBT by BiglySoftware.

the class TrackerStatus method setAllError.

/**
 * @param e
 */
private void setAllError(List<TRTrackerScraperResponseImpl> responses, Exception e) {
    String msg;
    if (e instanceof BEncodingException) {
        msg = e.getLocalizedMessage();
        if (msg.contains("html")) {
            msg = "Could not decode response, appears to be a website instead of tracker scrape: " + msg.replace('\n', ' ');
        } else {
            msg = "Bencoded response malformed: " + msg;
        }
    } else {
        msg = Debug.getNestedExceptionMessage(e);
    }
    for (TRTrackerScraperResponseImpl response : responses) {
        if (Logger.isEnabled()) {
            HashWrapper hash = response.getHash();
            Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID, LogEvent.LT_WARNING, "Error from scrape interface " + scrapeURL + " : " + msg));
        // e.printStackTrace();
        }
        response.setNextScrapeStartTime(SystemTime.getCurrentTime() + FAULTY_SCRAPE_RETRY_INTERVAL);
        response.setStatus(TRTrackerScraperResponse.ST_ERROR, StringInterner.intern(MessageText.getString(SS + "error") + msg));
        // notifiy listeners
        scraper.scrapeReceived(response);
    }
}
Also used : LogEvent(com.biglybt.core.logging.LogEvent) TRTrackerScraperResponseImpl(com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl)

Aggregations

TRTrackerScraperResponseImpl (com.biglybt.core.tracker.client.impl.TRTrackerScraperResponseImpl)6 LogEvent (com.biglybt.core.logging.LogEvent)4 PRUDPPacketHandlerException (com.biglybt.net.udp.uc.PRUDPPacketHandlerException)1 ClientIDException (com.biglybt.pif.clientid.ClientIDException)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 URL (java.net.URL)1