use of com.biglybt.core.disk.DiskManagerReadRequestListener in project BiglyBT by BiglySoftware.
the class DMReaderImpl method readBlock.
@Override
public void readBlock(final DiskManagerReadRequest request, final DiskManagerReadRequestListener _listener) {
request.requestStarts();
final DiskManagerReadRequestListener listener = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
request.requestEnds(true);
_listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
request.requestEnds(false);
_listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
};
DirectByteBuffer buffer = null;
try {
int length = request.getLength();
buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_DM_READ, length);
if (buffer == null) {
// Fix for bug #804874
Debug.out("DiskManager::readBlock:: ByteBufferPool returned null buffer");
listener.readFailed(request, new Exception("Out of memory"));
return;
}
int pieceNumber = request.getPieceNumber();
int offset = request.getOffset();
DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
if (pieceList.size() == 0) {
Debug.out("no pieceList entries for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
long previousFilesLength = 0;
int currentFile = 0;
long fileOffset = pieceList.get(0).getOffset();
while (currentFile < pieceList.size() && pieceList.getCumulativeLengthToPiece(currentFile) < offset) {
previousFilesLength = pieceList.getCumulativeLengthToPiece(currentFile);
currentFile++;
fileOffset = 0;
}
// update the offset (we're in the middle of a file)
fileOffset += offset - previousFilesLength;
List chunks = new ArrayList();
int buffer_position = 0;
while (buffer_position < length && currentFile < pieceList.size()) {
DMPieceMapEntry map_entry = pieceList.get(currentFile);
int length_available = map_entry.getLength() - (int) (fileOffset - map_entry.getOffset());
// explicitly limit the read size to the proper length, rather than relying on the underlying file being correctly-sized
// see long DMWriterAndCheckerImpl::checkPiece note
int entry_read_limit = buffer_position + length_available;
// now bring down to the required read length if this is shorter than this
// chunk of data
entry_read_limit = Math.min(length, entry_read_limit);
// this chunk denotes a read up to buffer offset "entry_read_limit"
chunks.add(new Object[] { map_entry.getFile().getCacheFile(), new Long(fileOffset), new Integer(entry_read_limit) });
buffer_position = entry_read_limit;
currentFile++;
fileOffset = 0;
}
if (chunks.size() == 0) {
Debug.out("no chunk reads for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
// this is where we go async and need to start counting requests for the sake
// of shutting down tidily
// have to wrap the request as we can validly have >1 for same piece/offset/length and
// the request type itself overrides object equiv based on this...
final Object[] request_wrapper = { request };
DiskManagerReadRequestListener l = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
complete();
listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
complete();
listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (!read_requests.remove(request_wrapper)) {
Debug.out("request not found");
}
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
};
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.readFailed(request, new Exception("Disk reader has been stopped"));
return;
}
async_reads++;
read_requests.add(request_wrapper);
} finally {
this_mon.exit();
}
new requestDispatcher(request, l, buffer, chunks);
} catch (Throwable e) {
if (buffer != null) {
buffer.returnToPool();
}
disk_manager.setFailed("Disk read error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.readFailed(request, e);
}
}
use of com.biglybt.core.disk.DiskManagerReadRequestListener in project BiglyBT by BiglySoftware.
the class DMReaderImpl method readBlock.
// returns null if the read can't be performed
@Override
public DirectByteBuffer readBlock(int pieceNumber, int offset, int length) {
DiskManagerReadRequest request = createReadRequest(pieceNumber, offset, length);
final AESemaphore sem = new AESemaphore("DMReader:readBlock");
final DirectByteBuffer[] result = { null };
readBlock(request, new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
result[0] = data;
sem.release();
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
return (result[0]);
}
Aggregations