use of com.biglybt.core.disk.DiskManagerReadRequest in project BiglyBT by BiglySoftware.
the class DMReaderImpl method readBlock.
@Override
public void readBlock(final DiskManagerReadRequest request, final DiskManagerReadRequestListener _listener) {
request.requestStarts();
final DiskManagerReadRequestListener listener = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
request.requestEnds(true);
_listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
request.requestEnds(false);
_listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
};
DirectByteBuffer buffer = null;
try {
int length = request.getLength();
buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_DM_READ, length);
if (buffer == null) {
// Fix for bug #804874
Debug.out("DiskManager::readBlock:: ByteBufferPool returned null buffer");
listener.readFailed(request, new Exception("Out of memory"));
return;
}
int pieceNumber = request.getPieceNumber();
int offset = request.getOffset();
DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
if (pieceList.size() == 0) {
Debug.out("no pieceList entries for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
long previousFilesLength = 0;
int currentFile = 0;
long fileOffset = pieceList.get(0).getOffset();
while (currentFile < pieceList.size() && pieceList.getCumulativeLengthToPiece(currentFile) < offset) {
previousFilesLength = pieceList.getCumulativeLengthToPiece(currentFile);
currentFile++;
fileOffset = 0;
}
// update the offset (we're in the middle of a file)
fileOffset += offset - previousFilesLength;
List chunks = new ArrayList();
int buffer_position = 0;
while (buffer_position < length && currentFile < pieceList.size()) {
DMPieceMapEntry map_entry = pieceList.get(currentFile);
int length_available = map_entry.getLength() - (int) (fileOffset - map_entry.getOffset());
// explicitly limit the read size to the proper length, rather than relying on the underlying file being correctly-sized
// see long DMWriterAndCheckerImpl::checkPiece note
int entry_read_limit = buffer_position + length_available;
// now bring down to the required read length if this is shorter than this
// chunk of data
entry_read_limit = Math.min(length, entry_read_limit);
// this chunk denotes a read up to buffer offset "entry_read_limit"
chunks.add(new Object[] { map_entry.getFile().getCacheFile(), new Long(fileOffset), new Integer(entry_read_limit) });
buffer_position = entry_read_limit;
currentFile++;
fileOffset = 0;
}
if (chunks.size() == 0) {
Debug.out("no chunk reads for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
// this is where we go async and need to start counting requests for the sake
// of shutting down tidily
// have to wrap the request as we can validly have >1 for same piece/offset/length and
// the request type itself overrides object equiv based on this...
final Object[] request_wrapper = { request };
DiskManagerReadRequestListener l = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
complete();
listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
complete();
listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (!read_requests.remove(request_wrapper)) {
Debug.out("request not found");
}
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
};
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.readFailed(request, new Exception("Disk reader has been stopped"));
return;
}
async_reads++;
read_requests.add(request_wrapper);
} finally {
this_mon.exit();
}
new requestDispatcher(request, l, buffer, chunks);
} catch (Throwable e) {
if (buffer != null) {
buffer.returnToPool();
}
disk_manager.setFailed("Disk read error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.readFailed(request, e);
}
}
use of com.biglybt.core.disk.DiskManagerReadRequest in project BiglyBT by BiglySoftware.
the class DMReaderImpl method readBlock.
// returns null if the read can't be performed
@Override
public DirectByteBuffer readBlock(int pieceNumber, int offset, int length) {
DiskManagerReadRequest request = createReadRequest(pieceNumber, offset, length);
final AESemaphore sem = new AESemaphore("DMReader:readBlock");
final DirectByteBuffer[] result = { null };
readBlock(request, new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
result[0] = data;
sem.release();
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
return (result[0]);
}
use of com.biglybt.core.disk.DiskManagerReadRequest in project BiglyBT by BiglySoftware.
the class PEPeerTransportProtocol method getOutgoingRequestedPieceNumbers.
@Override
public int[] getOutgoingRequestedPieceNumbers() {
try {
requested_mon.enter();
/**
* Cheap hack to reduce (but not remove all) the # of duplicate entries
*/
int iLastNumber = -1;
// allocate max size needed (we'll shrink it later)
final int[] pieceNumbers = new int[requested.size()];
int pos = 0;
for (int i = 0; i < requested.size(); i++) {
DiskManagerReadRequest request = null;
try {
request = (DiskManagerReadRequest) requested.get(i);
} catch (Exception e) {
Debug.printStackTrace(e);
}
if (request != null && iLastNumber != request.getPieceNumber()) {
iLastNumber = request.getPieceNumber();
pieceNumbers[pos++] = iLastNumber;
}
}
final int[] trimmed = new int[pos];
System.arraycopy(pieceNumbers, 0, trimmed, 0, pos);
return trimmed;
} finally {
requested_mon.exit();
}
}
use of com.biglybt.core.disk.DiskManagerReadRequest in project BiglyBT by BiglySoftware.
the class PEPeerTransportProtocol method request.
@Override
public DiskManagerReadRequest request(final int pieceNumber, final int pieceOffset, final int pieceLength, final boolean return_duplicates) {
final DiskManagerReadRequest request = manager.createDiskManagerRequest(pieceNumber, pieceOffset, pieceLength);
if (current_peer_state != TRANSFERING) {
manager.requestCanceled(request);
return null;
}
boolean added = false;
try {
requested_mon.enter();
if (!requested.contains(request)) {
if (requested.size() == 0) {
request.setLatencyTest();
}
requested.add(request);
added = true;
}
} finally {
requested_mon.exit();
}
if (added) {
if (is_metadata_download) {
if (az_metadata_supported) {
connection.getOutgoingMessageQueue().addMessage(new AZMetaData(pieceNumber, other_peer_request_version), false);
} else {
connection.getOutgoingMessageQueue().addMessage(new UTMetaData(pieceNumber, other_peer_request_version), false);
}
} else {
connection.getOutgoingMessageQueue().addMessage(new BTRequest(pieceNumber, pieceOffset, pieceLength, other_peer_request_version), false);
}
_lastPiece = pieceNumber;
if (DEBUG_FAST) {
if (really_choked_by_other_peer) {
System.out.println("Sending allow-fast request for " + pieceNumber + "/" + pieceOffset + "/" + pieceLength + " to " + getIp());
}
}
try {
recent_outgoing_requests_mon.enter();
recent_outgoing_requests.put(request, null);
} finally {
recent_outgoing_requests_mon.exit();
}
return (request);
} else {
if (return_duplicates) {
return (request);
} else {
return (null);
}
}
}
use of com.biglybt.core.disk.DiskManagerReadRequest in project BiglyBT by BiglySoftware.
the class PEPeerTransportProtocol method cancelRequests.
private void cancelRequests() {
if (!closing) {
// cancel any unsent requests in the queue
final Message[] type = { new BTRequest(-1, -1, -1, other_peer_request_version) };
connection.getOutgoingMessageQueue().removeMessagesOfType(type, false);
}
if (requested != null && requested.size() > 0) {
try {
requested_mon.enter();
if (!closing) {
// may have unchoked us, gotten a request, then choked without filling it - snub them
// if they actually have data coming in, they'll be unsnubbed as soon as it writes
final long timeSinceGoodData = getTimeSinceGoodDataReceived();
if (timeSinceGoodData == -1 || timeSinceGoodData > 60 * 1000)
setSnubbed(true);
}
for (int i = requested.size() - 1; i >= 0; i--) {
final DiskManagerReadRequest request = (DiskManagerReadRequest) requested.remove(i);
manager.requestCanceled(request);
}
} finally {
requested_mon.exit();
}
}
}
Aggregations