use of com.biglybt.core.peermanager.piecepicker.util.BitFlags in project BiglyBT by BiglySoftware.
the class PiecePickerImpl method recomputeAvailability.
private int[] recomputeAvailability() {
if (availabilityDrift > 0 && availabilityDrift != nbPieces && Logger.isEnabled())
Logger.log(new LogEvent(diskManager.getTorrent(), LOGID, LogEvent.LT_INFORMATION, "Recomputing availabiliy. Drift=" + availabilityDrift + ":" + peerControl.getDisplayName()));
final List peers = peerControl.getPeers();
final int[] newAvailability = new int[nbPieces];
int j;
int i;
// first our pieces
for (j = 0; j < nbPieces; j++) newAvailability[j] = dmPieces[j].isDone() ? 1 : 0;
// for all peers
final int peersSize = peers.size();
for (i = 0; i < peersSize; i++) {
// get the peer connection
final PEPeer peer = (PEPeerTransport) peers.get(i);
if (peer != null && peer.getPeerState() == PEPeer.TRANSFERING) {
// cycle trhough the pieces they actually have
final BitFlags peerHavePieces = peer.getAvailable();
if (peerHavePieces != null && peerHavePieces.nbSet > 0) {
for (j = peerHavePieces.start; j <= peerHavePieces.end; j++) {
if (peerHavePieces.flags[j])
++newAvailability[j];
}
}
}
}
return newAvailability;
}
use of com.biglybt.core.peermanager.piecepicker.util.BitFlags in project BiglyBT by BiglySoftware.
the class PiecePickerImpl method getRequestCandidate.
/**
* This method is the downloading core. It decides, for a given peer,
* which block should be requested. Here is the overall algorithm :
* 0. If there a FORCED_PIECE or reserved piece, that will be started/resumed if possible
* 1. Scan all the active pieces and find the rarest piece (and highest priority among equally rarest)
* that can possibly be continued by this peer, if any
* 2. While scanning the active pieces, develop a list of equally highest priority pieces
* (and equally rarest among those) as candidates for starting a new piece
* 3. If it can't find any piece, this means all pieces are
* already downloaded/full requested
* 4. Returns int[] pieceNumber, blockNumber if a request to be made is found,
* or null if none could be found
* @param pc PEPeerTransport to work with
*
* @return int with pieceNumberto be requested or -1 if no request could be found
*/
private int getRequestCandidate(final PEPeerTransport pt) {
if (pt == null || pt.getPeerState() != PEPeer.TRANSFERING)
return -1;
final BitFlags peerHavePieces = pt.getAvailable();
if (peerHavePieces == null || peerHavePieces.nbSet <= 0)
return -1;
// piece number and its block number that we'll try to DL
int[] reservedPieceNumbers = pt.getReservedPieceNumbers();
if (reservedPieceNumbers != null) {
for (int reservedPieceNumber : reservedPieceNumbers) {
PEPiece pePiece = pePieces[reservedPieceNumber];
if (pePiece != null) {
String peerReserved = pePiece.getReservedBy();
if (peerReserved != null && peerReserved.equals(pt.getIp())) {
if (peerHavePieces.flags[reservedPieceNumber] && pePiece.isRequestable()) {
return reservedPieceNumber;
} else {
pePiece.setReservedBy(null);
}
}
}
// reserved piece is no longer valid, dump it
pt.removeReservedPieceNumber(reservedPieceNumber);
}
// note, pieces reserved to peers that get disconnected are released in pepeercontrol
}
int reservedPieceNumber = -1;
// how many KB/s has the peer has been sending
final int peerSpeed = (int) pt.getStats().getDataReceiveRate() / 1024;
final int lastPiece = pt.getLastPiece();
// final boolean rarestOverride = calcRarestAllowed() > 0;
final int nbSnubbed = peerControl.getNbPeersSnubbed();
long resumeMinAvail = Long.MAX_VALUE;
int resumeMaxPriority = Integer.MIN_VALUE;
// can the peer continuea piece with lowest avail of all pieces we want
boolean resumeIsRarest = false;
int secondChoiceResume = -1;
BitFlags startCandidates = null;
int startMaxPriority = Integer.MIN_VALUE;
int startMinAvail = Integer.MAX_VALUE;
boolean startIsRarest = false;
boolean forceStart = false;
// aggregate priority of piece under inspection (start priority or resume priority for pieces to be resumed)
int priority;
// the swarm-wide availability level of the piece under inspection
int avail = 0;
// how long since the PEPiece first started downloading (requesting, actually)
long pieceAge;
final boolean rarestAllowed = calcRarestAllowed() > 0;
final int startI = peerHavePieces.start;
final int endI = peerHavePieces.end;
int i;
final int[] peerPriorities = pt.getPriorityOffsets();
final long now = SystemTime.getCurrentTime();
int[] request_hint = pt.getRequestHint();
int request_hint_piece_number;
if (request_hint != null) {
request_hint_piece_number = request_hint[0];
if (dmPieces[request_hint_piece_number].isDone()) {
pt.clearRequestHint();
request_hint_piece_number = -1;
}
} else {
request_hint_piece_number = -1;
}
if (request_hint_piece_number == -1) {
int[] g_hint = global_request_hint;
if (g_hint != null) {
request_hint_piece_number = g_hint[0];
if (dmPieces[request_hint_piece_number].isDone()) {
g_hint = null;
request_hint_piece_number = -1;
}
}
}
CopyOnWriteSet<Integer> forced = forced_pieces;
for (i = startI; i <= endI; i++) {
if (peerHavePieces.flags[i]) {
priority = startPriorities[i];
final DiskManagerPiece dmPiece = dmPieces[i];
if (priority >= 0 && dmPiece.isDownloadable()) {
if (peerPriorities != null) {
int peer_priority = peerPriorities[i];
if (peer_priority < 0) {
continue;
}
priority += peer_priority;
}
if (enable_request_hints && i == request_hint_piece_number) {
priority += PRIORITY_REQUEST_HINT;
PEPiece pePiece = pePieces[i];
if (pePiece == null) {
forceStart = true;
} else {
pePiece.setReservedBy(pt.getIp());
pt.addReservedPieceNumber(i);
}
}
final PEPiece pePiece = pePieces[i];
if (pePiece == null || pePiece.isRequestable()) {
// if this priority exceeds the priority-override threshold then we override rarity
boolean pieceRarestOverride = priority >= PRIORITY_OVERRIDES_RAREST ? true : rarestAllowed;
// piece is: Needed, not fully: Requested, Downloaded, Written, hash-Checking or Done
avail = availability[i];
if (avail == 0) {
// maybe we didn't know we could get it before
// but the peer says s/he has it
availability[i] = 1;
avail = 1;
} else if (forced != null && forced.contains(i)) {
// temp override for avail for force
avail = globalMinOthers;
} else if (sequentialDownload != 0 && globalMinOthers > 1) {
// temp override for seq download
avail = globalMinOthers;
}
// is the piece active
if (pePiece != null) {
if (priority != startPriorities[i])
// maintained for display purposes only
pePiece.setResumePriority(priority);
boolean startedRarest = rarestStartedPieces.contains(pePiece);
boolean rarestPrio = avail <= globalMinOthers && (startedRarest || rarestAllowed);
// How many requests can still be made on this piece?
final int freeReqs = pePiece.getNbUnrequested();
if (freeReqs <= 0) {
pePiece.setRequested();
continue;
}
// Don't touch pieces reserved for others
final String peerReserved = pePiece.getReservedBy();
if (peerReserved != null) {
if (!peerReserved.equals(pt.getIp()))
// reserved to somebody else
continue;
// the peer forgot this is reserved to him; re-associate it
pt.addReservedPieceNumber(i);
return i;
}
int pieceSpeed = pePiece.getSpeed();
// ### Piece/Peer speed checks
boolean mayResume = true;
if (pt.isSnubbed()) {
// snubbed peers shouldn't stall fast pieces under ANY condition
// may lead to trouble when the snubbed peer is the only seed, needs further testing
mayResume &= pieceSpeed < 1;
mayResume &= freeReqs > 2 || avail <= nbSnubbed;
} else {
// slower peers are allowed as long as there is enough free room
// || rarestPrio;
mayResume &= freeReqs * peerSpeed >= pieceSpeed / 2;
// prevent non-subbed peers from resuming on snubbed-peer-pieces but still allow them to resume stalled pieces
mayResume &= peerSpeed < 2 || pieceSpeed > 0 || pePiece.getNbRequests() == 0;
mayResume |= i == pt.getLastPiece();
}
// this will prevent unecessary piece starting
if (secondChoiceResume == -1 || avail > availability[secondChoiceResume])
secondChoiceResume = i;
if (!mayResume)
continue;
if (avail > resumeMinAvail)
continue;
priority += pieceSpeed;
priority += (i == lastPiece) ? PRIORITY_W_SAME_PIECE : 0;
// Adjust priority for purpose of continuing pieces
// how long since last written to (if written to)
priority += pePiece.getTimeSinceLastActivity() / PRIORITY_DW_STALE;
// how long since piece was started
pieceAge = now - pePiece.getCreationTime();
if (pieceAge > 0)
priority += PRIORITY_W_AGE * pieceAge / (PRIORITY_DW_AGE * dmPiece.getNbBlocks());
// how much is already written to disk
priority += (PRIORITY_W_PIECE_DONE * dmPiece.getNbWritten()) / dmPiece.getNbBlocks();
// this is only for display
pePiece.setResumePriority(priority);
if (avail < resumeMinAvail || (avail == resumeMinAvail && priority > resumeMaxPriority)) {
// Verify it's still possible to get a block to request from this piece
if (pePiece.hasUnrequestedBlock()) {
// change the different variables to reflect interest in this block
reservedPieceNumber = i;
resumeMinAvail = avail;
resumeMaxPriority = priority;
resumeMinAvail = avail;
resumeIsRarest = rarestPrio;
}
}
} else if (avail <= globalMinOthers && rarestAllowed) {
// rarest pieces only from now on
if (!startIsRarest) {
// 1st rarest piece
if (startCandidates == null)
startCandidates = new BitFlags(nbPieces);
startMaxPriority = priority;
startMinAvail = avail;
startIsRarest = avail <= globalMinOthers;
// clear the non-rarest bits in favor of only rarest
startCandidates.setOnly(i);
} else if (priority > startMaxPriority) {
// continuing rarest, higher priority level
if (startCandidates == null)
startCandidates = new BitFlags(nbPieces);
startMaxPriority = priority;
startCandidates.setOnly(i);
} else if (priority == startMaxPriority) {
// continuing rares, same priority level
startCandidates.setEnd(i);
}
} else if (!startIsRarest || !rarestAllowed) {
// not doing rarest pieces
if (priority > startMaxPriority) {
// new priority level
if (startCandidates == null)
startCandidates = new BitFlags(nbPieces);
startMaxPriority = priority;
startMinAvail = avail;
startIsRarest = avail <= globalMinOthers;
startCandidates.setOnly(i);
} else if (priority == startMaxPriority) {
// continuing same priority level
if (startCandidates == null)
startCandidates = new BitFlags(nbPieces);
if (avail < startMinAvail) {
// same priority, new availability level
startMinAvail = avail;
startIsRarest = avail <= globalMinOthers;
startCandidates.setOnly(i);
} else if (avail == startMinAvail) {
// same priority level, same availability level
startCandidates.setEnd(i);
}
}
}
}
}
}
}
if (!forceStart || startCandidates == null || startCandidates.nbSet <= 0) {
// can & should or must resume a piece?
if (reservedPieceNumber >= 0 && (resumeIsRarest || !startIsRarest || !rarestAllowed || startCandidates == null || startCandidates.nbSet <= 0))
return reservedPieceNumber;
if (secondChoiceResume != -1 && (startCandidates == null || startCandidates.nbSet <= 0)) {
// System.out.println("second choice resume:"+secondChoiceResume);
return secondChoiceResume;
}
// this would allow more non-rarest pieces to be resumed so they get completed so they can be re-shared,
// which can make us intersting to more peers, and generally improve the speed of the swarm,
// however, it can sometimes be hard to get the rarest pieces, such as when a holder unchokes very infrequently
// 20060312[MjrTom] this can lead to TOO many active pieces, so do the extra check with arbitrary # of active pieces
final boolean resumeIsBetter;
if (// check at arbitrary figure of 32 pieces
reservedPieceNumber >= 0 && globalMinOthers > 0 && peerControl.getNbActivePieces() > 32) {
resumeIsBetter = (resumeMaxPriority / resumeMinAvail) > (startMaxPriority / globalMinOthers);
if (Constants.isCVSVersion() && Logger.isEnabled())
Logger.log(new LogEvent(new Object[] { pt, peerControl }, LOGID, "Start/resume choice; piece #:" + reservedPieceNumber + " resumeIsBetter:" + resumeIsBetter + " globalMinOthers=" + globalMinOthers + " startMaxPriority=" + startMaxPriority + " startMinAvail=" + startMinAvail + " resumeMaxPriority=" + resumeMaxPriority + " resumeMinAvail=" + resumeMinAvail + " : " + pt));
if (resumeIsBetter)
return reservedPieceNumber;
}
}
// start a new piece; select piece from start candidates bitfield
return getPieceToStart(startCandidates);
}
use of com.biglybt.core.peermanager.piecepicker.util.BitFlags in project BiglyBT by BiglySoftware.
the class PEPeerControlImpl method doConnectionChecks.
private void doConnectionChecks() {
// if mixed networks then we have potentially two connections limits
// 1) general peer one - e.g. 100
// 2) general+reserved slots for non-public net - e.g. 103
// so we get to schedule 3 'extra' non-public connections
// every 1 second
boolean has_ipv6 = false;
boolean has_ipv4 = false;
boolean can_ipv6 = network_admin.hasIPV6Potential(true);
if (mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL == 0) {
// need to sync the rates periodically as when upload is disabled (for example)
// the we can end up with
// nothing requesting the rate in order for a change to be noticed
upload_limited_rate_group.getRateLimitBytesPerSecond();
download_limited_rate_group.getRateLimitBytesPerSecond();
final List<PEPeerTransport> peer_transports = peer_transports_cow;
int num_waiting_establishments = 0;
int udp_connections = 0;
for (int i = 0; i < peer_transports.size(); i++) {
final PEPeerTransport transport = peer_transports.get(i);
// update waiting count
final int state = transport.getConnectionState();
if (state == PEPeerTransport.CONNECTION_PENDING || state == PEPeerTransport.CONNECTION_CONNECTING) {
num_waiting_establishments++;
} else {
if (can_ipv6 && transport.getNetwork() == AENetworkClassifier.AT_PUBLIC) {
boolean is_ipv6 = transport.getIp().contains(":");
if (is_ipv6) {
has_ipv6 = true;
} else {
has_ipv4 = true;
}
}
}
if (!transport.isTCP()) {
udp_connections++;
}
}
int[] allowed_seeds_info = getMaxSeedConnections();
int base_allowed_seeds = allowed_seeds_info[0];
if (base_allowed_seeds > 0) {
int extra_seeds = allowed_seeds_info[1];
int to_disconnect = _seeds - base_allowed_seeds;
if (to_disconnect > 0) {
// seeds are limited by people trying to get a reasonable upload by connecting
// to leechers where possible. disconnect seeds from end of list to prevent
// cycling of seeds
Set<PEPeerTransport> to_retain = new HashSet<>();
if (extra_seeds > 0) {
for (PEPeerTransport transport : peer_transports) {
if (transport.isSeed() && transport.getNetwork() != AENetworkClassifier.AT_PUBLIC) {
to_retain.add(transport);
if (to_retain.size() == extra_seeds) {
break;
}
}
}
to_disconnect -= to_retain.size();
}
for (int i = peer_transports.size() - 1; i >= 0 && to_disconnect > 0; i--) {
final PEPeerTransport transport = peer_transports.get(i);
if (transport.isSeed()) {
if (!to_retain.contains(transport)) {
closeAndRemovePeer(transport, "Too many seeds", false);
to_disconnect--;
}
}
}
}
}
int[] allowed_info = getMaxNewConnectionsAllowed();
int allowed_base = allowed_info[0];
if (allowed_base < 0 || allowed_base > 1000) {
// ensure a very upper limit so it doesnt get out of control when using PEX
allowed_base = 1000;
allowed_info[0] = allowed_base;
}
if (adapter.isNATHealthy()) {
// if unfirewalled, leave slots avail for remote connections
// leave 5%
int free = getMaxConnections()[0] / 20;
allowed_base = allowed_base - free;
allowed_info[0] = allowed_base;
}
for (int i = 0; i < allowed_info.length; i++) {
int allowed = allowed_info[i];
if (allowed > 0) {
// try and connect only as many as necessary
final int wanted = TCPConnectionManager.MAX_SIMULTANEOUS_CONNECT_ATTEMPTS - num_waiting_establishments;
if (wanted > allowed) {
num_waiting_establishments += wanted - allowed;
}
int remaining = allowed;
int tcp_remaining = TCPNetworkManager.getSingleton().getConnectDisconnectManager().getMaxOutboundPermitted();
int udp_remaining = UDPNetworkManager.getSingleton().getConnectionManager().getMaxOutboundPermitted();
while (num_waiting_establishments < TCPConnectionManager.MAX_SIMULTANEOUS_CONNECT_ATTEMPTS && (tcp_remaining > 0 || udp_remaining > 0)) {
if (!is_running)
break;
final PeerItem item = peer_database.getNextOptimisticConnectPeer(i == 1);
if (item == null || !is_running)
break;
final PeerItem self = peer_database.getSelfPeer();
if (self != null && self.equals(item)) {
continue;
}
if (!isAlreadyConnected(item)) {
final String source = PeerItem.convertSourceString(item.getSource());
final boolean use_crypto = item.getHandshakeType() == PeerItemFactory.HANDSHAKE_TYPE_CRYPTO;
int tcp_port = item.getTCPPort();
int udp_port = item.getUDPPort();
if (udp_port == 0 && udp_probe_enabled) {
// for probing we assume udp port same as tcp
udp_port = tcp_port;
}
boolean prefer_udp_overall = prefer_udp || prefer_udp_default;
if (prefer_udp_overall && udp_port == 0) {
// see if we have previous record of this address as udp connectable
byte[] address = item.getIP().getBytes();
BloomFilter bloom = prefer_udp_bloom;
if (bloom != null && bloom.contains(address)) {
udp_port = tcp_port;
}
}
boolean tcp_ok = TCPNetworkManager.TCP_OUTGOING_ENABLED && tcp_port > 0 && tcp_remaining > 0;
boolean udp_ok = UDPNetworkManager.UDP_OUTGOING_ENABLED && udp_port > 0 && udp_remaining > 0;
if (tcp_ok && !(prefer_udp_overall && udp_ok)) {
if (makeNewOutgoingConnection(source, item.getAddressString(), tcp_port, udp_port, true, use_crypto, item.getCryptoLevel(), null) == null) {
tcp_remaining--;
num_waiting_establishments++;
remaining--;
}
} else if (udp_ok) {
if (makeNewOutgoingConnection(source, item.getAddressString(), tcp_port, udp_port, false, use_crypto, item.getCryptoLevel(), null) == null) {
udp_remaining--;
num_waiting_establishments++;
remaining--;
}
}
}
}
if (i == 0) {
if (UDPNetworkManager.UDP_OUTGOING_ENABLED && remaining > 0 && udp_remaining > 0 && udp_connections < MAX_UDP_CONNECTIONS) {
doUDPConnectionChecks(remaining);
}
}
}
}
}
// every 5 seconds
if (mainloop_loop_count % MAINLOOP_FIVE_SECOND_INTERVAL == 0) {
boolean do_dup_con_checks = dual_ipv4_ipv6_connection_action != 0 && (mainloop_loop_count % MAINLOOP_TEN_SECOND_INTERVAL == 0) && seeding_mode && has_ipv4 && has_ipv6 && !superSeedMode;
long piece_length = disk_mgr.getPieceLength();
final int DUP_CHECK_MIN_PIECES = 10;
final int min_done = Math.max(1, (int) ((piece_length * DUP_CHECK_MIN_PIECES * 1000) / disk_mgr.getTotalLength()));
final List<PEPeerTransport> peer_transports = peer_transports_cow;
List<PEPeerTransport> interesting_peers = new ArrayList<>(peer_transports.size());
for (int i = 0; i < peer_transports.size(); i++) {
final PEPeerTransport transport = peer_transports.get(i);
if (transport.doTimeoutChecks()) {
continue;
}
// keep-alive check
transport.doKeepAliveCheck();
// speed tuning check
transport.doPerformanceTuningCheck();
if (do_dup_con_checks) {
if (transport.getNetwork() == AENetworkClassifier.AT_PUBLIC) {
int done = transport.getPercentDoneInThousandNotation();
if (done < 1000 && done > min_done) {
interesting_peers.add(transport);
}
}
}
}
if (interesting_peers.size() > 1) {
Collections.sort(interesting_peers, new Comparator<PEPeerTransport>() {
public int compare(PEPeerTransport p1, PEPeerTransport p2) {
return (p1.getPercentDoneInThousandNotation() - p2.getPercentDoneInThousandNotation());
}
});
// look for duplicate connections from a peer over ipv4 + ipv6
int DUP_CHECK_TOLERANCE = Math.max(1, min_done / 2);
List<PEPeerTransport> to_ban = new ArrayList<>();
for (int i = 0; i < interesting_peers.size(); i++) {
PEPeerTransport peer1 = interesting_peers.get(i);
int p1_done = peer1.getPercentDoneInThousandNotation();
boolean p1_ipv6 = peer1.getIp().contains(":");
for (int j = i + 1; j < interesting_peers.size(); j++) {
PEPeerTransport peer2 = interesting_peers.get(j);
int p2_done = peer2.getPercentDoneInThousandNotation();
if (Math.abs(p2_done - p1_done) <= DUP_CHECK_TOLERANCE) {
BitFlags f1 = peer1.getAvailable();
BitFlags f2 = peer2.getAvailable();
if (f1 == null || f2 == null) {
continue;
}
boolean p2_ipv6 = peer2.getIp().contains(":");
if (p1_ipv6 == p2_ipv6) {
continue;
}
String cc_match = null;
PEPeerTransport[] peers = { peer1, peer2 };
for (PEPeerTransport peer : peers) {
String[] details = (String[]) peer.getUserData(DUP_PEER_CC_KEY);
if (details == null) {
try {
details = PeerUtils.getCountryDetails(peer);
} catch (Throwable e) {
}
if (details == null) {
details = new String[0];
}
peer.setUserData(DUP_PEER_CC_KEY, details);
}
if (details.length > 0) {
String cc = details[0];
if (cc_match == null) {
cc_match = cc;
} else if (!cc.equals(cc_match)) {
cc_match = null;
}
} else {
cc_match = null;
break;
}
}
if (cc_match == null) {
continue;
}
boolean[] b1 = f1.flags;
boolean[] b2 = f2.flags;
int same_pieces = 0;
for (int k = 0; k < b1.length; k++) {
if (b1[k] && b2[k]) {
same_pieces++;
}
}
int max_pieces = Math.max(f1.nbSet, f2.nbSet);
if (same_pieces < DUP_CHECK_MIN_PIECES || max_pieces < same_pieces || (same_pieces * 100) / max_pieces < 95) {
continue;
}
String[] ass = new String[2];
int hits = 0;
for (PEPeerTransport peer : peers) {
String as = (String) peer.getUserData(DUP_PEER_AS_KEY);
if (as == null) {
// prevent other lookups regardless
peer.setUserData(DUP_PEER_AS_KEY, "");
try {
network_admin.lookupASN(HostNameToIPResolver.syncResolve(peer.getIp()), new NetworkAdminASNListener() {
@Override
public void success(NetworkAdminASN asn) {
peer.setUserData(DUP_PEER_AS_KEY, asn.getAS());
}
@Override
public void failed(NetworkAdminException error) {
}
});
} catch (Throwable e) {
}
} else if (!as.isEmpty()) {
ass[hits++] = as;
}
}
if (hits == 2 && ass[0].equals(ass[1])) {
PEPeerTransport peer_to_ban;
if (dual_ipv4_ipv6_connection_action == 1) {
if (p1_ipv6) {
peer_to_ban = peer2;
} else {
peer_to_ban = peer1;
}
} else {
if (p1_ipv6) {
peer_to_ban = peer1;
} else {
peer_to_ban = peer2;
}
}
to_ban.add(peer_to_ban);
}
} else {
break;
}
}
}
for (PEPeerTransport peer : to_ban) {
String msg = "Duplicate IPv4 and IPv6 connection detected";
ip_filter.ban(peer.getIp(), getDisplayName() + ": " + msg, false);
closeAndRemovePeer(peer, msg, true);
}
}
}
// every 10 seconds check for connected + banned peers
if (mainloop_loop_count % MAINLOOP_TEN_SECOND_INTERVAL == 0) {
final long last_update = ip_filter.getLastUpdateTime();
if (last_update != ip_filter_last_update_time) {
ip_filter_last_update_time = last_update;
checkForBannedConnections();
}
}
// every 30 seconds
if (mainloop_loop_count % MAINLOOP_THIRTY_SECOND_INTERVAL == 0) {
// if we're at our connection limit, time out the least-useful
// one so we can establish a possibly-better new connection
optimisticDisconnectCount = 0;
int[] allowed = getMaxNewConnectionsAllowed();
if (allowed[0] + allowed[1] == 0) {
// we've reached limit
doOptimisticDisconnect(false, false, "");
}
}
// sweep over all peers in a 60 second timespan
float percentage = ((mainloop_loop_count % MAINLOOP_SIXTY_SECOND_INTERVAL) + 1F) / (1F * MAINLOOP_SIXTY_SECOND_INTERVAL);
int goal;
if (mainloop_loop_count % MAINLOOP_SIXTY_SECOND_INTERVAL == 0) {
goal = 0;
sweepList = peer_transports_cow;
} else {
goal = (int) Math.floor(percentage * sweepList.size());
}
for (int i = nextPEXSweepIndex; i < goal && i < sweepList.size(); i++) {
// System.out.println(mainloop_loop_count+" %:"+percentage+"
// start:"+nextPEXSweepIndex+" current:"+i+" <"+goal+"/"+sweepList.size());
final PEPeerTransport peer = sweepList.get(i);
peer.updatePeerExchange();
}
nextPEXSweepIndex = goal;
if (mainloop_loop_count % MAINLOOP_SIXTY_SECOND_INTERVAL == 0) {
List<PEPeerTransport> peer_transports = peer_transports_cow;
if (peer_transports.size() > 1) {
Map<String, List<PEPeerTransport>> peer_map = new HashMap<>();
for (PEPeerTransport peer : peer_transports) {
if (peer.isIncoming()) {
continue;
}
if (peer.getPeerState() == PEPeer.CONNECTING && peer.getConnectionState() == PEPeerTransport.CONNECTION_CONNECTING && peer.getLastMessageSentTime() != 0) {
String key = peer.getIp() + ":" + peer.getPort();
List<PEPeerTransport> list = peer_map.get(key);
if (list == null) {
list = new ArrayList<>(1);
peer_map.put(key, list);
}
list.add(peer);
}
}
for (List<PEPeerTransport> list : peer_map.values()) {
if (list.size() >= 2) {
long newest_time = Long.MIN_VALUE;
PEPeerTransport newest_peer = null;
for (PEPeerTransport peer : list) {
long last_sent = peer.getLastMessageSentTime();
if (last_sent > newest_time) {
newest_time = last_sent;
newest_peer = peer;
}
}
for (PEPeerTransport peer : list) {
if (peer != newest_peer) {
if (peer.getPeerState() == PEPeer.CONNECTING && peer.getConnectionState() == PEPeerTransport.CONNECTION_CONNECTING) {
closeAndRemovePeer(peer, "Removing old duplicate connection", false);
}
}
}
}
}
}
}
}
use of com.biglybt.core.peermanager.piecepicker.util.BitFlags in project BiglyBT by BiglySoftware.
the class PEPeerControlHashHandlerImpl method hashRequestSupport.
private PeerHashRequest hashRequestSupport(int piece_number, HashListener listener) {
List<PEPeer> peers = peer_manager.getPeers();
PEPeer best_peer = null;
int best_req_count = Integer.MAX_VALUE;
for (PEPeer peer : peers) {
if (peer.getPeerState() != PEPeer.TRANSFERING) {
continue;
}
BitFlags avail = peer.getAvailable();
if (avail != null && avail.flags[piece_number]) {
int req_count = peer.getOutgoingRequestCount();
if (req_count == 0) {
PeerHashRequest res = request((PEPeerTransport) peer, piece_number, listener);
if (res != null) {
return (res);
}
} else {
if (req_count < best_req_count) {
best_peer = peer;
best_req_count = req_count;
}
}
}
}
if (best_peer != null) {
PeerHashRequest res = request((PEPeerTransport) best_peer, piece_number, listener);
return (res);
} else {
return (null);
}
}
use of com.biglybt.core.peermanager.piecepicker.util.BitFlags in project BiglyBT by BiglySoftware.
the class PEPeerTransportProtocol method spoofMDAvailability.
private void spoofMDAvailability(int mds) {
int md_pieces = (mds + 16 * 1024 - 1) / (16 * 1024);
manager.setTorrentInfoDictSize(mds);
BitFlags tempHavePieces = new BitFlags(nbPieces);
for (int i = 0; i < md_pieces; i++) {
tempHavePieces.set(i);
}
peerHavePieces = tempHavePieces;
addAvailability();
really_choked_by_other_peer = false;
calculatePiecePriorities();
}
Aggregations