Search in sources :

Example 16 with PEPeerTransport

use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.

the class PEPeerTransportProtocol method reconnect.

@Override
public PEPeerTransport reconnect(boolean tryUDP, boolean tryIPv6) {
    boolean use_tcp = isTCP() && !(tryUDP && getUDPListenPort() > 0);
    if ((use_tcp && getTCPListenPort() > 0) || (!use_tcp && getUDPListenPort() > 0)) {
        boolean use_crypto = getPeerItemIdentity().getHandshakeType() == PeerItemFactory.HANDSHAKE_TYPE_CRYPTO;
        PEPeerTransport new_conn = PEPeerTransportFactory.createTransport(manager, getPeerSource(), tryIPv6 && alternativeAddress != null ? alternativeAddress.getHostAddress() : getIp(), getTCPListenPort(), getUDPListenPort(), use_tcp, use_crypto, crypto_level, null);
        // log to both relations
        Logger.log(new LogEvent(new Object[] { this, new_conn }, LOGID, "attempting to reconnect, creating new connection"));
        if (new_conn instanceof PEPeerTransportProtocol) {
            PEPeerTransportProtocol pt = (PEPeerTransportProtocol) new_conn;
            pt.checkForReconnect(mySessionID);
            // carry over the alt address in case the reconnect fails and we try again with ipv6
            pt.alternativeAddress = alternativeAddress;
        }
        manager.addPeer(new_conn);
        return (new_conn);
    } else {
        return (null);
    }
}
Also used : PEPeerTransport(com.biglybt.core.peer.impl.PEPeerTransport)

Example 17 with PEPeerTransport

use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.

the class PiecePickerImpl method findRTAPieceToDownload.

protected final boolean findRTAPieceToDownload(PEPeerTransport pt, boolean best_uploader, long best_uploader_next_block_eta) {
    if (pt == null || pt.getPeerState() != PEPeer.TRANSFERING) {
        return (false);
    }
    final BitFlags peerHavePieces = pt.getAvailable();
    if (peerHavePieces == null || peerHavePieces.nbSet <= 0) {
        return (false);
    }
    String rta_log_str = LOG_RTA ? pt.getIp() : null;
    try {
        // how many KB/s has the peer has been sending
        final int peerSpeed = (int) pt.getStats().getDataReceiveRate() / 1024;
        final int startI = peerHavePieces.start;
        final int endI = peerHavePieces.end;
        int piece_min_rta_index = -1;
        int piece_min_rta_block = 0;
        long piece_min_rta_time = Long.MAX_VALUE;
        long now = SystemTime.getCurrentTime();
        long my_next_block_eta = now + getNextBlockETAFromNow(pt);
        for (int i = startI; i <= endI; i++) {
            long piece_rta = provider_piece_rtas[i];
            if (peerHavePieces.flags[i] && startPriorities[i] == PRIORITY_REALTIME && piece_rta > 0) {
                final DiskManagerPiece dmPiece = dmPieces[i];
                if (!dmPiece.isDownloadable()) {
                    continue;
                }
                final PEPiece pePiece = pePieces[i];
                if (pePiece != null && pePiece.isDownloaded()) {
                    continue;
                }
                Object realtime_data = null;
                boolean try_allocate_even_though_late = my_next_block_eta > piece_rta && best_uploader_next_block_eta > piece_rta;
                if (piece_rta >= piece_min_rta_time) {
                // piece is less urgent than an already found one
                } else if (my_next_block_eta > piece_rta && !(best_uploader || best_uploader_next_block_eta > piece_rta)) {
                // only allocate if we have a chance of getting this block in time or we're
                // the best uploader we've got/even the best uploader can't get it
                // the second part is important for when we get to the point whereby no peers
                // can get a block in time. Here we need to allocate someone to get it as
                // otherwise we'll concentrate on getting lower priority pieces that we can
                // get in time and leave the stuck ones for just the best uploader to get
                } else if (pePiece == null || (realtime_data = pePiece.getRealTimeData()) == null) {
                    if (LOG_RTA)
                        rta_log_str += "{alloc_new=" + i + ",time=" + (piece_rta - now) + "}";
                    // no real-time block allocated yet
                    piece_min_rta_time = piece_rta;
                    piece_min_rta_index = i;
                    piece_min_rta_block = 0;
                } else {
                    RealTimeData rtd = (RealTimeData) realtime_data;
                    // check the blocks to see if any are now lagging behind their ETA given current peer speed
                    List[] peer_requests = rtd.getRequests();
                    for (int j = 0; j < peer_requests.length; j++) {
                        if (pePiece.isDownloaded(j) || pePiece.isWritten(j)) {
                            continue;
                        }
                        List block_peer_requests = peer_requests[j];
                        long best_eta = Long.MAX_VALUE;
                        boolean pt_already_present = false;
                        // tidy up existing request data
                        Iterator it = block_peer_requests.iterator();
                        while (it.hasNext()) {
                            RealTimePeerRequest pr = (RealTimePeerRequest) it.next();
                            PEPeerTransport this_pt = pr.getPeer();
                            if (this_pt.getPeerState() != PEPeer.TRANSFERING) {
                                if (LOG_RTA)
                                    rta_log_str += "{peer_dead=" + this_pt.getIp() + "}";
                                it.remove();
                                continue;
                            }
                            DiskManagerReadRequest this_request = pr.getRequest();
                            int request_index = this_pt.getRequestIndex(this_request);
                            if (request_index == -1) {
                                if (LOG_RTA)
                                    rta_log_str += "{request_lost=" + this_request.getPieceNumber() + "}";
                                it.remove();
                                continue;
                            }
                            if (this_pt == pt) {
                                pt_already_present = true;
                                break;
                            }
                            long this_up_bps = this_pt.getStats().getDataReceiveRate();
                            if (this_up_bps < 1) {
                                this_up_bps = 1;
                            }
                            int next_block_bytes = (request_index + 1) * DiskManager.BLOCK_SIZE;
                            long this_peer_eta = now + ((next_block_bytes * 1000) / this_up_bps);
                            best_eta = Math.min(best_eta, this_peer_eta);
                        }
                        if (!pt_already_present) {
                            if (block_peer_requests.size() == 0) {
                                if (LOG_RTA)
                                    rta_log_str += "{alloc as no req=" + i + ",block=" + j + ",time=" + (piece_rta - now) + "}";
                                piece_min_rta_time = piece_rta;
                                piece_min_rta_index = i;
                                piece_min_rta_block = j;
                                // earlier blocks always have priority
                                break;
                            } else if (best_eta > piece_rta && (best_uploader || !try_allocate_even_though_late)) {
                                if (LOG_RTA)
                                    rta_log_str += "{lagging=" + i + ",block=" + j + ",time=" + (best_eta - piece_rta) + "}";
                                if (my_next_block_eta < best_eta) {
                                    if (LOG_RTA)
                                        rta_log_str += "{taking over, time=" + (best_eta - my_next_block_eta) + "}";
                                    piece_min_rta_time = piece_rta;
                                    piece_min_rta_index = i;
                                    piece_min_rta_block = j;
                                    // earlier blocks always have priority
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
        if (piece_min_rta_index != -1) {
            if (LOG_RTA)
                rta_log_str += ",{select_piece=" + piece_min_rta_index + ",block=" + piece_min_rta_block + ",time=" + (piece_min_rta_time - now) + "}";
            if (dispenser.dispense(1, DiskManager.BLOCK_SIZE) == 1 || (pt.isLANLocal() && !includeLanPeersInReqLimiting)) {
                PEPiece pePiece = pePieces[piece_min_rta_index];
                if (pePiece == null) {
                    // create piece manually
                    pePiece = new PEPieceImpl(this, dmPieces[piece_min_rta_index], peerSpeed >> 1);
                    // Assign the created piece to the pieces array.
                    peerControl.addPiece(pePiece, piece_min_rta_index, pt);
                    pePiece.setResumePriority(PRIORITY_REALTIME);
                    if (availability[piece_min_rta_index] <= globalMinOthers) {
                        nbRarestActive++;
                    }
                }
                RealTimeData rtd = (RealTimeData) pePiece.getRealTimeData();
                if (rtd == null) {
                    rtd = new RealTimeData(pePiece);
                    pePiece.setRealTimeData(rtd);
                }
                pePiece.getAndMarkBlock(pt, piece_min_rta_block);
                DiskManagerReadRequest request = pt.request(piece_min_rta_index, piece_min_rta_block * DiskManager.BLOCK_SIZE, pePiece.getBlockSize(piece_min_rta_block), true);
                if (request != null) {
                    peerControl.requestAdded(pePiece, pt, request);
                    List real_time_requests = rtd.getRequests()[piece_min_rta_block];
                    real_time_requests.add(new RealTimePeerRequest(pt, request));
                    pt.setLastPiece(piece_min_rta_index);
                    pePiece.setLastRequestedPeerSpeed(peerSpeed);
                    return (true);
                } else {
                    if (LOG_RTA)
                        rta_log_str += "{request failed}";
                    if (!pt.isLANLocal() || includeLanPeersInReqLimiting)
                        dispenser.returnUnusedChunks(1, DiskManager.BLOCK_SIZE);
                    return (false);
                }
            } else {
                if (LOG_RTA)
                    rta_log_str += "{dispenser denied}";
                return (false);
            }
        } else {
            if (LOG_RTA)
                rta_log_str += "{no piece found}";
            return (false);
        }
    } finally {
        if (LOG_RTA) {
            System.out.println(rta_log_str);
        }
    }
}
Also used : BitFlags(com.biglybt.core.peermanager.piecepicker.util.BitFlags) PEPieceImpl(com.biglybt.core.peer.impl.PEPieceImpl) PEPeerTransport(com.biglybt.core.peer.impl.PEPeerTransport) DMPieceList(com.biglybt.core.disk.impl.piecemapper.DMPieceList)

Example 18 with PEPeerTransport

use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.

the class PiecePickerImpl method recomputeAvailability.

private int[] recomputeAvailability() {
    if (availabilityDrift > 0 && availabilityDrift != nbPieces && Logger.isEnabled())
        Logger.log(new LogEvent(diskManager.getTorrent(), LOGID, LogEvent.LT_INFORMATION, "Recomputing availabiliy. Drift=" + availabilityDrift + ":" + peerControl.getDisplayName()));
    final List peers = peerControl.getPeers();
    final int[] newAvailability = new int[nbPieces];
    int j;
    int i;
    // first our pieces
    for (j = 0; j < nbPieces; j++) newAvailability[j] = dmPieces[j].isDone() ? 1 : 0;
    // for all peers
    final int peersSize = peers.size();
    for (i = 0; i < peersSize; i++) {
        // get the peer connection
        final PEPeer peer = (PEPeerTransport) peers.get(i);
        if (peer != null && peer.getPeerState() == PEPeer.TRANSFERING) {
            // cycle trhough the pieces they actually have
            final BitFlags peerHavePieces = peer.getAvailable();
            if (peerHavePieces != null && peerHavePieces.nbSet > 0) {
                for (j = peerHavePieces.start; j <= peerHavePieces.end; j++) {
                    if (peerHavePieces.flags[j])
                        ++newAvailability[j];
                }
            }
        }
    }
    return newAvailability;
}
Also used : BitFlags(com.biglybt.core.peermanager.piecepicker.util.BitFlags) LogEvent(com.biglybt.core.logging.LogEvent) PEPeerTransport(com.biglybt.core.peer.impl.PEPeerTransport) DMPieceList(com.biglybt.core.disk.impl.piecemapper.DMPieceList)

Aggregations

PEPeerTransport (com.biglybt.core.peer.impl.PEPeerTransport)18 DMPieceList (com.biglybt.core.disk.impl.piecemapper.DMPieceList)5 PEPeer (com.biglybt.core.peer.PEPeer)5 BitFlags (com.biglybt.core.peermanager.piecepicker.util.BitFlags)4 PeerItem (com.biglybt.core.peermanager.peerdb.PeerItem)3 GC (org.eclipse.swt.graphics.GC)3 DiskManager (com.biglybt.core.disk.DiskManager)2 HashListener (com.biglybt.core.disk.DiskManagerCheckRequestListener.HashListener)2 DownloadManager (com.biglybt.core.download.DownloadManager)2 PiecePicker (com.biglybt.core.peermanager.piecepicker.PiecePicker)2 TOTorrentFile (com.biglybt.core.torrent.TOTorrentFile)2 TOTorrentFileHashTree (com.biglybt.core.torrent.TOTorrentFileHashTree)2 AERunnable (com.biglybt.core.util.AERunnable)2 DisplayFormatters (com.biglybt.core.util.DisplayFormatters)2 SimpleTimer (com.biglybt.core.util.SimpleTimer)2 SystemTime (com.biglybt.core.util.SystemTime)2 Image (org.eclipse.swt.graphics.Image)2 Rectangle (org.eclipse.swt.graphics.Rectangle)2 DiskManagerReadRequest (com.biglybt.core.disk.DiskManagerReadRequest)1 DiskManagerReadRequestListener (com.biglybt.core.disk.DiskManagerReadRequestListener)1