use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class ConnectedTimeItem method refresh.
@Override
public void refresh(TableCell cell) {
PEPeerTransport peer = (PEPeerTransport) cell.getDataSource();
long value = (peer == null) ? 0 : peer.getTimeSinceConnectionEstablished();
if (!cell.setSortValue(value) && cell.isValid()) {
return;
}
cell.setText(TimeFormatter.format(value / 1000));
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class StateItem method refresh.
@Override
public void refresh(TableCell cell) {
// TODO fix this "naughty" cast
PEPeerTransport peer = (PEPeerTransport) cell.getDataSource();
String state_text = "";
if (peer != null) {
int state = peer.getConnectionState();
if (!cell.setSortValue(state) && cell.isValid()) {
return;
}
switch(state) {
case PEPeerTransport.CONNECTION_PENDING:
state_text = MessageText.getString("PeersView.state.pending");
break;
case PEPeerTransport.CONNECTION_CONNECTING:
state_text = MessageText.getString("PeersView.state.connecting");
break;
case PEPeerTransport.CONNECTION_WAITING_FOR_HANDSHAKE:
state_text = MessageText.getString("PeersView.state.handshake");
break;
case PEPeerTransport.CONNECTION_FULLY_ESTABLISHED:
state_text = MessageText.getString("PeersView.state.established");
break;
}
}
cell.setText(state_text);
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class PEPeerTransportProtocol method decodeBTHandshake.
protected void decodeBTHandshake(BTHandshake handshake) {
if (Logger.isEnabled())
Logger.log(new LogEvent(this, LOGID, "Received handshake with reserved bytes: " + ByteFormatter.nicePrint(handshake.getReserved(), false)));
PeerIdentityDataID my_peer_data_id = manager.getPeerIdentityDataID();
if (getConnectionState() == CONNECTION_FULLY_ESTABLISHED) {
handshake.destroy();
closeConnectionInternally("peer sent another handshake after the initial connect");
}
if (!Arrays.equals(manager.getHash(), handshake.getDataHash())) {
closeConnectionInternally("handshake has wrong infohash");
handshake.destroy();
return;
}
peer_id = handshake.getPeerId();
// Decode a client identification string from the given peerID
this.client_peer_id = this.client = StringInterner.intern(PeerClassifier.getClientDescription(peer_id, network));
// make sure the client type is not banned
if (!PeerClassifier.isClientTypeAllowed(client)) {
closeConnectionInternally(client + " client type not allowed to connect, banned");
handshake.destroy();
return;
}
// make sure we are not connected to ourselves
if (Arrays.equals(manager.getPeerId(), peer_id)) {
// make sure we dont do it again
manager.peerVerifiedAsSelf(this);
closeConnectionInternally("given peer id matches myself");
handshake.destroy();
return;
}
// make sure we are not already connected to this peer
boolean sameIdentity = PeerIdentityManager.containsIdentity(my_peer_data_id, peer_id, getPort());
boolean sameIP = false;
// allow loopback connects for co-located proxy-based connections and testing
boolean same_allowed = COConfigurationManager.getBooleanParameter("Allow Same IP Peers") || ip.equals("127.0.0.1");
if (!same_allowed) {
if (PeerIdentityManager.containsIPAddress(my_peer_data_id, ip)) {
sameIP = true;
}
}
if (sameIdentity) {
boolean close = true;
if (connection.isLANLocal()) {
// this new connection is lan-local
PEPeerTransport existing = manager.getTransportFromIdentity(peer_id);
if (existing != null) {
String existing_ip = existing.getIp();
if (!existing.isLANLocal() || (existing_ip.endsWith(".1") && !existing_ip.equals(ip))) {
// so drop the existing connection if it is an external (non lan-local) one
Debug.outNoStack("Dropping existing non-lanlocal peer connection [" + existing + "] in favour of [" + this + "]");
manager.removePeer(existing);
close = false;
}
}
}
if (close) {
if (Constants.IS_CVS_VERSION) {
try {
List<PEPeer> peers = manager.getPeers();
String dup_str = "?";
boolean dup_ip = false;
for (PEPeer p : peers) {
if (p == this) {
continue;
}
byte[] id = p.getId();
if (Arrays.equals(id, peer_id)) {
dup_ip = p.getIp().equals(getIp());
dup_str = p.getClient() + "/" + p.getClientNameFromExtensionHandshake() + "/" + p.getIp() + "/" + p.getPort();
break;
}
}
String my_str = getClient() + "/" + getIp() + "/" + getPort();
if (!dup_ip) {
Debug.outNoStack("Duplicate peer id detected: id=" + ByteFormatter.encodeString(peer_id) + ": this=" + my_str + ",other=" + dup_str);
}
} catch (Throwable e) {
}
}
closeConnectionInternally("peer matches already-connected peer id");
handshake.destroy();
return;
}
}
if (sameIP) {
closeConnectionInternally("peer matches already-connected IP address, duplicate connections not allowed");
handshake.destroy();
return;
}
// make sure we haven't reached our connection limit
boolean max_reached = manager.getMaxNewConnectionsAllowed(network) == 0;
if (max_reached && !manager.doOptimisticDisconnect(isLANLocal(), isPriorityConnection(), network)) {
int[] _con_max = manager.getMaxConnections();
int con_max = _con_max[0] + _con_max[1];
final String msg = "too many existing peer connections [p" + PeerIdentityManager.getIdentityCount(my_peer_data_id) + "/g" + PeerIdentityManager.getTotalIdentityCount() + ", pmx" + PeerUtils.MAX_CONNECTIONS_PER_TORRENT + "/gmx" + PeerUtils.MAX_CONNECTIONS_TOTAL + "/dmx" + con_max + "]";
// System.out.println( msg );
closeConnectionInternally(msg);
handshake.destroy();
return;
}
try {
closing_mon.enter();
if (closing) {
final String msg = "connection already closing";
closeConnectionInternally(msg);
handshake.destroy();
return;
}
if (!PeerIdentityManager.addIdentity(my_peer_data_id, peer_id, getPort(), ip)) {
closeConnectionInternally("peer matches already-connected peer id");
handshake.destroy();
return;
}
identityAdded = true;
} finally {
closing_mon.exit();
}
if (Logger.isEnabled())
Logger.log(new LogEvent(this, LOGID, "In: has sent their handshake"));
// Let's store the reserved bits somewhere so they can be examined later (externally).
handshake_reserved_bytes = handshake.getReserved();
/*
* Waiting until we've received the initiating-end's full handshake, before sending back our own,
* really should be the "proper" behavior. However, classic BT trackers running NAT checking will
* only send the first 48 bytes (up to infohash) of the peer handshake, skipping peerid, which means
* we'll never get their complete handshake, and thus never reply, which causes the NAT check to fail.
* So, we need to send our handshake earlier, after we've verified the infohash.
*
if( incoming ) { //wait until we've received their handshake before sending ours
sendBTHandshake();
}
*/
this.ml_dht_enabled = (handshake_reserved_bytes[7] & 0x01) == 1;
// disable fast if we have per-torrent upload limit as it is non-trivial to enforce for choked fast-start
// transfers as peer is in the multi-peer upload group (as choked) and in this mode the limit isn't
// enforced (see http://forum.vuze.com/thread.jspa?threadID=105262)
fast_extension_enabled = BTHandshake.FAST_EXTENSION_ENABLED && manager.getUploadRateLimitBytesPerSecond() == 0 && (handshake_reserved_bytes[7] & 0x04) != 0;
messaging_mode = decideExtensionProtocol(handshake);
// extended protocol processing
if (messaging_mode == MESSAGING_AZMP) {
/**
* We log when a non-Azureus client claims to support extended messaging...
* Obviously other Azureus clients do, so there's no point logging about them!
*/
if (Logger.isEnabled() && !client.contains("Azureus") && !client.contains(Constants.AZUREUS_NAME)) {
Logger.log(new LogEvent(this, LOGID, "Handshake claims extended AZ " + "messaging support... enabling AZ mode."));
}
// Ignore the handshake setting - wait for the AZHandshake to indicate
// support instead.
this.ml_dht_enabled = false;
Transport transport = connection.getTransport();
int padding_mode;
if (transport.isEncrypted()) {
if (transport.isTCP()) {
padding_mode = AZMessageEncoder.PADDING_MODE_NORMAL;
} else {
padding_mode = AZMessageEncoder.PADDING_MODE_MINIMAL;
}
} else {
padding_mode = AZMessageEncoder.PADDING_MODE_NONE;
}
connection.getIncomingMessageQueue().setDecoder(new AZMessageDecoder());
connection.getOutgoingMessageQueue().setEncoder(new AZMessageEncoder(padding_mode));
// We will wait until we get the Az handshake before considering the connection
// initialised.
this.sendAZHandshake();
handshake.destroy();
} else if (messaging_mode == MESSAGING_LTEP) {
if (Logger.isEnabled()) {
Logger.log(new LogEvent(this, LOGID, "Enabling LT extension protocol support..."));
}
connection.getIncomingMessageQueue().setDecoder(new LTMessageDecoder());
connection.getOutgoingMessageQueue().setEncoder(new LTMessageEncoder(this));
generateSessionId();
if (!is_metadata_download) {
this.initPostConnection(handshake);
}
this.sendLTHandshake();
} else {
this.client = ClientIdentifier.identifyBTOnly(this.client_peer_id, this.handshake_reserved_bytes);
connection.getIncomingMessageQueue().getDecoder().resumeDecoding();
this.initPostConnection(handshake);
}
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class PiecePickerImpl method allocateRequests.
/**
* one reason requests don't stem from the individual peers is so the connections can be
* sorted by best uploaders, providing some ooprtunity to download the most important
* (ie; rarest and/or highest priority) pieces faster and more reliably
*/
@Override
public final void allocateRequests() {
if (!hasNeededUndonePiece) {
return;
}
allocate_request_loop_count++;
final List peers = peerControl.getPeers();
final int peersSize = peers.size();
// final long[] upRates =new long[peersSize];
final ArrayList<PEPeerTransport> bestUploaders = new ArrayList<>(peersSize);
for (int i = 0; i < peersSize; i++) {
final PEPeerTransport peer = (PEPeerTransport) peers.get(i);
if (peer.isDownloadPossible()) {
int no_req_count = peer.getConsecutiveNoRequestCount();
if (no_req_count == 0 || allocate_request_loop_count % (no_req_count + 1) == 0) {
bestUploaders.add(peer);
// final long upRate =peer.getStats().getSmoothDataReceiveRate();
// UnchokerUtil.updateLargestValueFirstSort(upRate, upRates, peer, bestUploaders, 0);
}
}
}
/* sort all peers we're currently downloading from
* with the most favorable for the next request one as 1st entry
* randomize list first to not pick the same candidates if the list of best doesn't return conclusive results
*/
Collections.shuffle(bestUploaders);
for (int i = 0; i < 3; i++) {
try {
Collections.sort(bestUploaders, new Comparator<PEPeerTransport>() {
@Override
public int compare(PEPeerTransport pt1, PEPeerTransport pt2) {
if (pt1 == pt2) {
return (0);
}
PEPeerStats stats2 = pt2.getStats();
PEPeerStats stats1 = pt1.getStats();
/* pt1 comes first if we want to request data from it more than from pt2
* it is "smaller", i.e. return is < 0
*/
int toReturn = 0;
// lan peers to the front of the queue as they'll ignore request limiting
if (pt1.isLANLocal() && !pt2.isLANLocal())
toReturn = -1;
else if (!pt1.isLANLocal() && pt2.isLANLocal())
toReturn = 1;
// try to download from the currently fastest, this is important for the request focusing
if (toReturn == 0)
toReturn = (int) (stats2.getSmoothDataReceiveRate() - stats1.getSmoothDataReceiveRate());
// first try to download from peers that we're uploading to, that should stabilize tit-for-tat a bit
if (toReturn == 0 && (!pt2.isChokedByMe() || !pt1.isChokedByMe()))
toReturn = (int) (stats2.getDataSendRate() - stats1.getDataSendRate());
// avoid snubbed ones for the next step here
if (toReturn == 0 && pt2.isSnubbed() && !pt1.isSnubbed())
toReturn = -1;
if (toReturn == 0 && !pt2.isSnubbed() && pt1.isSnubbed())
toReturn = 1;
// try some peer we haven't downloaded from yet (this should allow us to taste all peers)
if (toReturn == 0 && stats2.getTotalDataBytesReceived() == 0 && stats1.getTotalDataBytesReceived() > 0)
toReturn = 1;
if (toReturn == 0 && stats1.getTotalDataBytesReceived() == 0 && stats2.getTotalDataBytesReceived() > 0)
toReturn = -1;
return toReturn;
}
});
break;
} catch (IllegalArgumentException e) {
// jdk1.7 introduced this exception
// java.lang.IllegalArgumentException: Comparison method violates its general contract!
// under contract violation. We have an unstable comparator here as it uses all sorts of
// data that can change during the sort. To fix this properly we would need to cache this
// data for the duration of the sort, which is expensive given that we don't hugely care
// for the accuracy of this sort. So swallow the occasional error
}
}
final int uploadersSize = bestUploaders.size();
if (uploadersSize == 0) {
// no usable peers, bail out early
return;
}
int REQUESTS_MIN;
boolean done_priorities = false;
if (priorityRTAexists) {
REQUESTS_MIN = REQUESTS_MIN_MIN;
final Map[] peer_randomiser = { null };
// to keep the ordering consistent we need to use a fixed metric unless
// we remove + re-add a peer, at which point we need to take account of
// the fact that it has a new request allocated
final Map block_time_order_peers_metrics = new HashMap(uploadersSize);
Set block_time_order_peers = new TreeSet(new Comparator() {
@Override
public int compare(Object arg1, Object arg2) {
if (arg1 == arg2) {
return (0);
}
PEPeerTransport pt1 = (PEPeerTransport) arg1;
PEPeerTransport pt2 = (PEPeerTransport) arg2;
Integer m1 = (Integer) block_time_order_peers_metrics.get(pt1);
if (m1 == null) {
m1 = new Integer(getNextBlockETAFromNow(pt1));
block_time_order_peers_metrics.put(pt1, m1);
}
Integer m2 = (Integer) block_time_order_peers_metrics.get(pt2);
if (m2 == null) {
m2 = new Integer(getNextBlockETAFromNow(pt2));
block_time_order_peers_metrics.put(pt2, m2);
}
int result = m1.intValue() - m2.intValue();
if (result == 0) {
Map pr = peer_randomiser[0];
if (pr == null) {
pr = peer_randomiser[0] = new LightHashMap(bestUploaders.size());
}
Integer r_1 = (Integer) pr.get(pt1);
if (r_1 == null) {
r_1 = new Integer(random.nextInt());
pr.put(pt1, r_1);
}
Integer r_2 = (Integer) pr.get(pt2);
if (r_2 == null) {
r_2 = new Integer(random.nextInt());
pr.put(pt2, r_2);
}
result = r_1.intValue() - r_2.intValue();
if (result == 0) {
result = pt1.hashCode() - pt2.hashCode();
if (result == 0) {
// v unlikely - inconsistent but better than losing a peer
result = 1;
}
}
}
return (result);
}
});
block_time_order_peers.addAll(bestUploaders);
PEPeerTransport best_uploader = (PEPeerTransport) bestUploaders.get(0);
long best_block_eta = SystemTime.getCurrentTime() + getNextBlockETAFromNow(best_uploader);
// give priority pieces the first look-in
// we need to sort by how quickly the peer can get a block, not just its base speed
boolean allocated_request = true;
Set allocations_started = new HashSet();
try {
while (allocated_request && priorityRTAexists) {
allocated_request = false;
while (!block_time_order_peers.isEmpty()) {
Iterator it = block_time_order_peers.iterator();
PEPeerTransport pt = (PEPeerTransport) it.next();
it.remove();
if (!pt.isDownloadPossible() || pt.isSnubbed()) {
continue;
}
// ignore request number advice from peers in RTA mode, we gotta do what we can
int maxRequests = REQUESTS_MIN + (int) (pt.getStats().getDataReceiveRate() / SLOPE_REQUESTS) + 1;
if (maxRequests > REQUESTS_MAX || maxRequests < 0) {
maxRequests = REQUESTS_MAX;
}
int currentRequests = pt.getNbRequests();
int allowed_requests = maxRequests - currentRequests;
if (allowed_requests > 0) {
if (!done_priorities) {
done_priorities = true;
computeBasePriorities();
if (!priorityRTAexists) {
break;
}
}
if (!allocations_started.contains(pt)) {
pt.requestAllocationStarts(startPriorities);
allocations_started.add(pt);
}
if (findRTAPieceToDownload(pt, pt == best_uploader, best_block_eta)) {
if (allowed_requests > 1) {
block_time_order_peers_metrics.remove(pt);
block_time_order_peers.add(pt);
}
}
}
}
}
} finally {
Iterator it = allocations_started.iterator();
while (it.hasNext()) {
((PEPeerTransport) it.next()).requestAllocationComplete();
}
}
} else {
int required_blocks = (int) (diskManager.getRemainingExcludingDND() / DiskManager.BLOCK_SIZE);
int blocks_per_uploader = required_blocks / uploadersSize;
// if we have plenty of blocks outstanding we can afford to be more generous in the
// minimum number of requests we allocate
REQUESTS_MIN = Math.max(REQUESTS_MIN_MIN, Math.min(REQUESTS_MIN_MAX, blocks_per_uploader / 2));
}
checkEndGameMode();
for (int i = 0; i < uploadersSize; i++) {
final PEPeerTransport pt = (PEPeerTransport) bestUploaders.get(i);
// only request when there are still free tokens in the bucket or when it's a lan peer (which get sorted to the front of the queue)
if (dispenser.peek(DiskManager.BLOCK_SIZE) < 1 && (!pt.isLANLocal() || includeLanPeersInReqLimiting))
break;
// can we transfer something?
if (pt.isDownloadPossible()) {
int peer_request_num = pt.getMaxNbRequests();
// If request queue is too low, enqueue another request
int maxRequests;
if (peer_request_num != -1) {
maxRequests = peer_request_num;
} else {
if (!pt.isSnubbed()) {
if (!endGameMode) {
int peer_requests_min;
if (pt.getUnchokedForMillis() < 10 * 1000) {
peer_requests_min = REQUESTS_MIN;
} else {
peer_requests_min = REQUESTS_MIN_MIN;
}
maxRequests = peer_requests_min + (int) (pt.getStats().getDataReceiveRate() / SLOPE_REQUESTS);
if (maxRequests > REQUESTS_MAX || maxRequests < 0)
maxRequests = REQUESTS_MAX;
} else {
maxRequests = 2;
}
} else {
maxRequests = pt.getNetwork() == AENetworkClassifier.AT_PUBLIC ? 1 : 2;
}
}
if (pt.getNbRequests() <= (maxRequests * 3) / 5) {
if (!done_priorities) {
done_priorities = true;
computeBasePriorities();
}
int total_allocated = 0;
try {
boolean peer_managing_requests = pt.requestAllocationStarts(startPriorities);
while (pt.isDownloadPossible() && pt.getNbRequests() < maxRequests) {
// is there anything else to download?
int allocated;
if (peer_managing_requests || !endGameMode) {
allocated = findPieceToDownload(pt, maxRequests);
} else {
allocated = findPieceInEndGameMode(pt, maxRequests);
}
if (allocated == 0) {
break;
} else {
total_allocated += allocated;
}
}
} finally {
pt.requestAllocationComplete();
}
if (total_allocated == 0) {
// there are various reasons that we might not allocate any requests to a peer
// such as them not having any pieces we're interested in. Keep track of the
// number of consecutive "no requests" outcomes so we can reduce the scheduling
// frequency of such peers
int no_req_count = pt.getConsecutiveNoRequestCount();
if (no_req_count < NO_REQUEST_BACKOFF_MAX_LOOPS) {
pt.setConsecutiveNoRequestCount(no_req_count + 1);
}
// System.out.println( pt.getIp() + ": nb=" + pt.getNbRequests() + ",max=" + maxRequests + ",nrc=" + no_req_count +",loop=" + allocate_request_loop_count);
} else {
pt.setConsecutiveNoRequestCount(0);
}
}
}
}
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class UnchokerUtil method getNextOptimisticPeers.
public static ArrayList<PEPeer> getNextOptimisticPeers(ArrayList<PEPeer> all_peers, boolean factor_reciprocated, boolean allow_snubbed, int num_needed) {
// find all potential optimistic peers
ArrayList<PEPeer> optimistics = new ArrayList<>();
for (int i = 0; i < all_peers.size(); i++) {
PEPeer peer = all_peers.get(i);
if (isUnchokable(peer, false) && peer.isChokedByMe()) {
optimistics.add(peer);
}
}
if (optimistics.isEmpty() && allow_snubbed) {
// try again, allowing snubbed peers as last resort
for (int i = 0; i < all_peers.size(); i++) {
PEPeer peer = all_peers.get(i);
if (isUnchokable(peer, true) && peer.isChokedByMe()) {
optimistics.add(peer);
}
}
}
// no unchokable peers avail
if (optimistics.isEmpty())
return null;
// factor in peer reciprocation ratio when picking optimistic peers
ArrayList<PEPeer> result = new ArrayList<>(optimistics.size());
if (factor_reciprocated) {
ArrayList<PEPeerTransport> ratioed_peers = new ArrayList<>(optimistics.size());
long[] ratios = new long[optimistics.size()];
Arrays.fill(ratios, Long.MIN_VALUE);
// order by upload ratio
for (int i = 0; i < optimistics.size(); i++) {
PEPeer peer = optimistics.get(i);
// score of >0 means we've uploaded more, <0 means we've downloaded more
long score = peer.getStats().getTotalDataBytesSent() - peer.getStats().getTotalDataBytesReceived();
// higher value = worse score
UnchokerUtil.updateLargestValueFirstSort(score, ratios, peer, ratioed_peers, 0);
}
for (int i = 0; i < num_needed && ratioed_peers.size() > 0; i++) {
// map to sorted list using a logistic curve
double factor = 1F / (0.8 + 0.2 * Math.pow(RandomUtils.nextFloat(), -1));
int pos = (int) (factor * ratioed_peers.size());
result.add(ratioed_peers.remove(pos));
}
} else {
for (int i = 0; i < num_needed && optimistics.size() > 0; i++) {
int rand_pos = new Random().nextInt(optimistics.size());
result.add(optimistics.remove(rand_pos));
}
}
return (result);
// TODO:
// in downloading mode, we would be better off optimistically unchoking just peers we are interested in ourselves,
// as they could potentially reciprocate. however, new peers have no pieces to share, and are not interesting to
// us, and would never be unchoked, and thus would never get any data.
// we could use a deterministic method for new peers to get their very first piece from us
}
Aggregations