use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class HTTPNetworkManager method reRoute.
protected void reRoute(final HTTPNetworkConnection old_http_connection, final byte[] old_hash, final byte[] new_hash, final String header) {
final NetworkConnection old_connection = old_http_connection.getConnection();
PeerManagerRegistration reg_data = PeerManager.getSingleton().manualMatchHash(old_connection.getEndpoint().getNotionalAddress(), new_hash);
if (reg_data == null) {
old_http_connection.close("Re-routing failed - registration not found");
return;
}
final Transport transport = old_connection.detachTransport();
old_http_connection.close("Switching torrents");
final NetworkConnection new_connection = NetworkManager.getSingleton().bindTransport(transport, new HTTPMessageEncoder(), new HTTPMessageDecoder(header));
PeerManager.getSingleton().manualRoute(reg_data, new_connection, new PeerManagerRoutingListener() {
@Override
public boolean routed(PEPeerTransport peer) {
HTTPNetworkConnection new_http_connection;
if (header.contains("/webseed")) {
new_http_connection = new HTTPNetworkConnectionWebSeed(HTTPNetworkManager.this, new_connection, peer);
} else if (header.contains("/files/")) {
new_http_connection = new HTTPNetworkConnectionFile(HTTPNetworkManager.this, new_connection, peer);
} else {
return (false);
}
// fake a wakeup so pre-read header is processed
new_http_connection.readWakeup();
return (true);
}
});
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class PeersGraphicView method refresh.
protected void refresh() {
synchronized (dm_data_lock) {
if (canvas == null || canvas.isDisposed()) {
return;
}
Rectangle bounds = canvas.getClientArea();
if (bounds.width <= 0 || bounds.height <= 0) {
return;
}
Point panelSize = canvas.getSize();
boolean clearImage = img == null || img.isDisposed() || img.getBounds().width != bounds.width || img.getBounds().height != bounds.height;
if (clearImage) {
if (img != null && !img.isDisposed()) {
img.dispose();
}
// System.out.println("clear " + img);
img = new Image(canvas.getDisplay(), bounds.width, bounds.height);
}
GC gc = new GC(img);
try {
int pw = panelSize.x;
int ph = panelSize.y;
int num_dms = dm_data.length;
if (num_dms == 0 || pw == 0 || ph == 0) {
gc.setBackground(Colors.white);
gc.fillRectangle(bounds);
return;
}
int h_cells;
int v_cells;
if (ph <= pw) {
v_cells = 1;
h_cells = pw / ph;
double f = Math.sqrt(((double) num_dms) / (v_cells * h_cells));
int factor = (int) Math.ceil(f);
h_cells *= factor;
v_cells = factor;
} else {
v_cells = ph / pw;
h_cells = 1;
double f = Math.sqrt(((double) num_dms) / (v_cells * h_cells));
int factor = (int) Math.ceil(f);
v_cells *= factor;
h_cells = factor;
}
ph = h_cells == 1 ? (ph / num_dms) : (ph / v_cells);
pw = v_cells == 1 ? (pw / num_dms) : (pw / h_cells);
// System.out.println( h_cells + "*" + v_cells + ": " + pw + "*" + ph );
Point mySize = new Point(pw, ph);
int num = 0;
Point lastOffset = null;
for (ManagerData data : dm_data) {
DownloadManager manager = data.manager;
PEPeer[] sortedPeers;
try {
data.peers_mon.enter();
List<PEPeerTransport> connectedPeers = new ArrayList<>();
for (PEPeer peer : data.peers) {
if (peer_filter.acceptPeer(peer)) {
if (peer instanceof PEPeerTransport) {
PEPeerTransport peerTransport = (PEPeerTransport) peer;
if (peerTransport.getConnectionState() == PEPeerTransport.CONNECTION_FULLY_ESTABLISHED)
connectedPeers.add(peerTransport);
}
}
}
sortedPeers = connectedPeers.toArray(new PEPeer[connectedPeers.size()]);
} finally {
data.peers_mon.exit();
}
if (sortedPeers == null)
return;
for (int i = 0; i < 3; i++) {
try {
Arrays.sort(sortedPeers, peerComparator);
break;
} catch (IllegalArgumentException e) {
// can happen as peer data can change during sort and result in 'comparison method violates its general contract' error
}
}
int h = num % h_cells;
int v = num / h_cells;
Point myOffset = new Point(h * pw, v * ph);
render(manager, data, gc, sortedPeers, mySize, myOffset);
num++;
lastOffset = myOffset;
}
int rem_x = panelSize.x - (lastOffset.x + mySize.x);
if (rem_x > 0) {
gc.setBackground(Colors.white);
gc.fillRectangle(lastOffset.x + mySize.x, lastOffset.y, rem_x, mySize.y);
}
int rem_y = panelSize.y - (lastOffset.y + mySize.y);
if (rem_y > 0) {
gc.setBackground(Colors.white);
gc.fillRectangle(0, lastOffset.y + mySize.y, panelSize.x, rem_y);
}
} finally {
gc.dispose();
canvas.redraw();
}
}
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class ClientStatsView method peerRemoved.
@Override
public void peerRemoved(PEPeer peer) {
synchronized (mapData) {
ClientStatsDataSource stat = mapData.get(getID(peer));
if (stat != null) {
stat.current--;
String network = null;
if (peer instanceof PEPeerTransport) {
PeerItem identity = ((PEPeerTransport) peer).getPeerItemIdentity();
if (identity != null) {
network = identity.getNetwork();
}
}
stat.bytesReceived += peer.getStats().getTotalDataBytesReceived();
stat.bytesSent += peer.getStats().getTotalDataBytesSent();
stat.bytesDiscarded += peer.getStats().getTotalBytesDiscarded();
if (network != null) {
Map<String, Object> map = stat.perNetworkStats.get(network);
if (map == null) {
map = new HashMap<>();
stat.perNetworkStats.put(network, map);
}
long bytesReceived = MapUtils.getMapLong(map, "bytesReceived", 0);
map.put("bytesReceived", bytesReceived + peer.getStats().getTotalDataBytesReceived());
long bytesSent = MapUtils.getMapLong(map, "bytesSent", 0);
map.put("bytesSent", bytesSent + peer.getStats().getTotalDataBytesSent());
long bytesDiscarded = MapUtils.getMapLong(map, "bytesDiscarded", 0);
map.put("bytesDiscarded", bytesDiscarded + peer.getStats().getTotalBytesDiscarded());
}
if (tv != null) {
TableRowCore row = tv.getRow(stat);
if (row != null) {
row.invalidate();
}
}
}
}
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class PiecePickerImpl method findRTAPieceToDownload.
protected final boolean findRTAPieceToDownload(PEPeerTransport pt, boolean best_uploader, long best_uploader_next_block_eta) {
if (pt == null || pt.getPeerState() != PEPeer.TRANSFERING) {
return (false);
}
final BitFlags peerHavePieces = pt.getAvailable();
if (peerHavePieces == null || peerHavePieces.nbSet <= 0) {
return (false);
}
String rta_log_str = LOG_RTA ? pt.getIp() : null;
try {
// how many KB/s has the peer has been sending
final int peerSpeed = (int) pt.getStats().getDataReceiveRate() / 1024;
final int startI = peerHavePieces.start;
final int endI = peerHavePieces.end;
int piece_min_rta_index = -1;
int piece_min_rta_block = 0;
long piece_min_rta_time = Long.MAX_VALUE;
long now = SystemTime.getCurrentTime();
long my_next_block_eta = now + getNextBlockETAFromNow(pt);
for (int i = startI; i <= endI; i++) {
long piece_rta = provider_piece_rtas[i];
if (peerHavePieces.flags[i] && startPriorities[i] == PRIORITY_REALTIME && piece_rta > 0) {
final DiskManagerPiece dmPiece = dmPieces[i];
if (!dmPiece.isDownloadable()) {
continue;
}
final PEPiece pePiece = pePieces[i];
if (pePiece != null && pePiece.isDownloaded()) {
continue;
}
Object realtime_data = null;
boolean try_allocate_even_though_late = my_next_block_eta > piece_rta && best_uploader_next_block_eta > piece_rta;
if (piece_rta >= piece_min_rta_time) {
// piece is less urgent than an already found one
} else if (my_next_block_eta > piece_rta && !(best_uploader || best_uploader_next_block_eta > piece_rta)) {
// only allocate if we have a chance of getting this block in time or we're
// the best uploader we've got/even the best uploader can't get it
// the second part is important for when we get to the point whereby no peers
// can get a block in time. Here we need to allocate someone to get it as
// otherwise we'll concentrate on getting lower priority pieces that we can
// get in time and leave the stuck ones for just the best uploader to get
} else if (pePiece == null || (realtime_data = pePiece.getRealTimeData()) == null) {
if (LOG_RTA)
rta_log_str += "{alloc_new=" + i + ",time=" + (piece_rta - now) + "}";
// no real-time block allocated yet
piece_min_rta_time = piece_rta;
piece_min_rta_index = i;
piece_min_rta_block = 0;
} else {
RealTimeData rtd = (RealTimeData) realtime_data;
// check the blocks to see if any are now lagging behind their ETA given current peer speed
List[] peer_requests = rtd.getRequests();
for (int j = 0; j < peer_requests.length; j++) {
if (pePiece.isDownloaded(j) || pePiece.isWritten(j)) {
continue;
}
List block_peer_requests = peer_requests[j];
long best_eta = Long.MAX_VALUE;
boolean pt_already_present = false;
// tidy up existing request data
Iterator it = block_peer_requests.iterator();
while (it.hasNext()) {
RealTimePeerRequest pr = (RealTimePeerRequest) it.next();
PEPeerTransport this_pt = pr.getPeer();
if (this_pt.getPeerState() != PEPeer.TRANSFERING) {
if (LOG_RTA)
rta_log_str += "{peer_dead=" + this_pt.getIp() + "}";
it.remove();
continue;
}
DiskManagerReadRequest this_request = pr.getRequest();
int request_index = this_pt.getRequestIndex(this_request);
if (request_index == -1) {
if (LOG_RTA)
rta_log_str += "{request_lost=" + this_request.getPieceNumber() + "}";
it.remove();
continue;
}
if (this_pt == pt) {
pt_already_present = true;
break;
}
long this_up_bps = this_pt.getStats().getDataReceiveRate();
if (this_up_bps < 1) {
this_up_bps = 1;
}
int next_block_bytes = (request_index + 1) * DiskManager.BLOCK_SIZE;
long this_peer_eta = now + ((next_block_bytes * 1000) / this_up_bps);
best_eta = Math.min(best_eta, this_peer_eta);
}
if (!pt_already_present) {
if (block_peer_requests.size() == 0) {
if (LOG_RTA)
rta_log_str += "{alloc as no req=" + i + ",block=" + j + ",time=" + (piece_rta - now) + "}";
piece_min_rta_time = piece_rta;
piece_min_rta_index = i;
piece_min_rta_block = j;
// earlier blocks always have priority
break;
} else if (best_eta > piece_rta && (best_uploader || !try_allocate_even_though_late)) {
if (LOG_RTA)
rta_log_str += "{lagging=" + i + ",block=" + j + ",time=" + (best_eta - piece_rta) + "}";
if (my_next_block_eta < best_eta) {
if (LOG_RTA)
rta_log_str += "{taking over, time=" + (best_eta - my_next_block_eta) + "}";
piece_min_rta_time = piece_rta;
piece_min_rta_index = i;
piece_min_rta_block = j;
// earlier blocks always have priority
break;
}
}
}
}
}
}
}
if (piece_min_rta_index != -1) {
if (LOG_RTA)
rta_log_str += ",{select_piece=" + piece_min_rta_index + ",block=" + piece_min_rta_block + ",time=" + (piece_min_rta_time - now) + "}";
if (dispenser.dispense(1, DiskManager.BLOCK_SIZE) == 1 || (pt.isLANLocal() && !includeLanPeersInReqLimiting)) {
PEPiece pePiece = pePieces[piece_min_rta_index];
if (pePiece == null) {
// create piece manually
pePiece = new PEPieceImpl(this, dmPieces[piece_min_rta_index], peerSpeed >> 1);
// Assign the created piece to the pieces array.
peerControl.addPiece(pePiece, piece_min_rta_index, pt);
pePiece.setResumePriority(PRIORITY_REALTIME);
if (availability[piece_min_rta_index] <= globalMinOthers) {
nbRarestActive++;
}
}
RealTimeData rtd = (RealTimeData) pePiece.getRealTimeData();
if (rtd == null) {
rtd = new RealTimeData(pePiece);
pePiece.setRealTimeData(rtd);
}
pePiece.getAndMarkBlock(pt, piece_min_rta_block);
DiskManagerReadRequest request = pt.request(piece_min_rta_index, piece_min_rta_block * DiskManager.BLOCK_SIZE, pePiece.getBlockSize(piece_min_rta_block), true);
if (request != null) {
List real_time_requests = rtd.getRequests()[piece_min_rta_block];
real_time_requests.add(new RealTimePeerRequest(pt, request));
pt.setLastPiece(piece_min_rta_index);
pePiece.setLastRequestedPeerSpeed(peerSpeed);
return (true);
} else {
if (LOG_RTA)
rta_log_str += "{request failed}";
if (!pt.isLANLocal() || includeLanPeersInReqLimiting)
dispenser.returnUnusedChunks(1, DiskManager.BLOCK_SIZE);
return (false);
}
} else {
if (LOG_RTA)
rta_log_str += "{dispenser denied}";
return (false);
}
} else {
if (LOG_RTA)
rta_log_str += "{no piece found}";
return (false);
}
} finally {
if (LOG_RTA) {
System.out.println(rta_log_str);
}
}
}
use of com.biglybt.core.peer.impl.PEPeerTransport in project BiglyBT by BiglySoftware.
the class PiecePickerImpl method recomputeAvailability.
private int[] recomputeAvailability() {
if (availabilityDrift > 0 && availabilityDrift != nbPieces && Logger.isEnabled())
Logger.log(new LogEvent(diskManager.getTorrent(), LOGID, LogEvent.LT_INFORMATION, "Recomputing availabiliy. Drift=" + availabilityDrift + ":" + peerControl.getDisplayName()));
final List peers = peerControl.getPeers();
final int[] newAvailability = new int[nbPieces];
int j;
int i;
// first our pieces
for (j = 0; j < nbPieces; j++) newAvailability[j] = dmPieces[j].isDone() ? 1 : 0;
// for all peers
final int peersSize = peers.size();
for (i = 0; i < peersSize; i++) {
// get the peer connection
final PEPeer peer = (PEPeerTransport) peers.get(i);
if (peer != null && peer.getPeerState() == PEPeer.TRANSFERING) {
// cycle trhough the pieces they actually have
final BitFlags peerHavePieces = peer.getAvailable();
if (peerHavePieces != null && peerHavePieces.nbSet > 0) {
for (j = peerHavePieces.start; j <= peerHavePieces.end; j++) {
if (peerHavePieces.flags[j])
++newAvailability[j];
}
}
}
}
return newAvailability;
}
Aggregations