use of com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry in project BiglyBT by BiglySoftware.
the class PieceInfoView method createPeerInfoPanel.
private void createPeerInfoPanel(Composite parent) {
GridLayout layout;
GridData gridData;
// Peer Info section contains
// - Peer's Block display
// - Peer's Datarate
pieceInfoComposite = new Composite(parent, SWT.NONE);
layout = new GridLayout();
layout.numColumns = 2;
layout.horizontalSpacing = 0;
layout.verticalSpacing = 0;
layout.marginHeight = 0;
layout.marginWidth = 0;
pieceInfoComposite.setLayout(layout);
gridData = new GridData(GridData.FILL, GridData.FILL, true, true);
pieceInfoComposite.setLayoutData(gridData);
imageLabel = new Label(pieceInfoComposite, SWT.NULL);
gridData = new GridData();
imageLabel.setLayoutData(gridData);
topLabel = new Label(pieceInfoComposite, SWT.NULL);
topLabel.setBackground(Colors.white);
gridData = new GridData(SWT.FILL, SWT.DEFAULT, false, false);
topLabel.setLayoutData(gridData);
sc = new ScrolledComposite(pieceInfoComposite, SWT.V_SCROLL);
sc.setExpandHorizontal(true);
sc.setExpandVertical(true);
layout = new GridLayout();
layout.horizontalSpacing = 0;
layout.verticalSpacing = 0;
layout.marginHeight = 0;
layout.marginWidth = 0;
sc.setLayout(layout);
gridData = new GridData(GridData.FILL, GridData.FILL, true, true, 2, 1);
sc.setLayoutData(gridData);
sc.getVerticalBar().setIncrement(BLOCK_SIZE);
pieceInfoCanvas = new Canvas(sc, SWT.NO_REDRAW_RESIZE | SWT.NO_BACKGROUND);
gridData = new GridData(GridData.FILL, SWT.DEFAULT, true, false);
pieceInfoCanvas.setLayoutData(gridData);
pieceInfoCanvas.addPaintListener(new PaintListener() {
@Override
public void paintControl(PaintEvent e) {
if (e.width <= 0 || e.height <= 0)
return;
try {
Rectangle bounds = (img == null) ? null : img.getBounds();
if (bounds == null || dlm == null || dlm.getPeerManager() == null) {
e.gc.fillRectangle(e.x, e.y, e.width, e.height);
} else {
if (e.x + e.width > bounds.width)
e.gc.fillRectangle(bounds.width, e.y, e.x + e.width - bounds.width + 1, e.height);
if (e.y + e.height > bounds.height)
e.gc.fillRectangle(e.x, bounds.height, e.width, e.y + e.height - bounds.height + 1);
int width = Math.min(e.width, bounds.width - e.x);
int height = Math.min(e.height, bounds.height - e.y);
e.gc.drawImage(img, e.x, e.y, width, height, e.x, e.y, width, height);
}
} catch (Exception ex) {
}
}
});
pieceInfoCanvas.addListener(SWT.KeyDown, new DoNothingListener());
pieceInfoCanvas.addListener(SWT.Resize, new Listener() {
@Override
public void handleEvent(Event e) {
synchronized (PieceInfoView.this) {
if (alreadyFilling) {
return;
}
alreadyFilling = true;
}
// wrap in asyncexec because sc.setMinWidth (called later) doesn't work
// too well inside a resize (the canvas won't size isn't always updated)
Utils.execSWTThreadLater(0, new AERunnable() {
@Override
public void runSupport() {
if (img != null) {
int iOldColCount = img.getBounds().width / BLOCK_SIZE;
int iNewColCount = pieceInfoCanvas.getClientArea().width / BLOCK_SIZE;
if (iOldColCount != iNewColCount)
refreshInfoCanvas();
}
synchronized (PieceInfoView.this) {
alreadyFilling = false;
}
}
});
}
});
sc.setContent(pieceInfoCanvas);
pieceInfoCanvas.addMouseMoveListener(new MouseMoveListener() {
@Override
public void mouseMove(MouseEvent event) {
int piece_number = getPieceNumber(event.x, event.y);
if (piece_number != selectedPiece) {
selectedPieceShowFilePending = -1;
}
}
});
pieceInfoCanvas.addMouseTrackListener(new MouseTrackAdapter() {
@Override
public void mouseHover(MouseEvent event) {
int piece_number = getPieceNumber(event.x, event.y);
if (piece_number >= 0) {
selectedPiece = piece_number;
selectedPieceShowFilePending = piece_number;
SimpleTimer.addEvent("ShowFile", SystemTime.getOffsetTime(1000), new TimerEventPerformer() {
@Override
public void perform(TimerEvent event) {
Utils.execSWTThread(new Runnable() {
@Override
public void run() {
if (selectedPieceShowFilePending == piece_number) {
selectedPieceShowFile = true;
refreshInfoCanvas();
}
}
});
}
});
refreshInfoCanvas();
DiskManager disk_manager = dlm.getDiskManager();
PEPeerManager pm = dlm.getPeerManager();
DiskManagerPiece dm_piece = disk_manager.getPiece(piece_number);
PEPiece pm_piece = pm.getPiece(piece_number);
String text = "Piece " + piece_number + ": " + dm_piece.getString();
if (pm_piece != null) {
text += ", active: " + pm_piece.getString();
} else {
if (dm_piece.isNeeded() && !dm_piece.isDone()) {
text += ", inactive: " + pm.getPiecePicker().getPieceString(piece_number);
}
}
text += " - ";
DMPieceList l = disk_manager.getPieceList(piece_number);
for (int i = 0; i < l.size(); i++) {
DMPieceMapEntry entry = l.get(i);
DiskManagerFileInfo info = entry.getFile();
text += (i == 0 ? "" : "; ") + info.getFile(true).getName();
}
topLabelRHS = text;
} else {
topLabelRHS = "";
}
updateTopLabel();
}
@Override
public void mouseExit(MouseEvent e) {
selectedPiece = -1;
selectedPieceShowFile = false;
refreshInfoCanvas();
}
});
final Menu menu = new Menu(pieceInfoCanvas.getShell(), SWT.POP_UP);
pieceInfoCanvas.setMenu(menu);
pieceInfoCanvas.addListener(SWT.MenuDetect, new Listener() {
@Override
public void handleEvent(Event event) {
Point pt = pieceInfoCanvas.toControl(event.x, event.y);
int piece_number = getPieceNumber(pt.x, pt.y);
menu.setData("pieceNumber", piece_number);
}
});
MenuBuildUtils.addMaintenanceListenerForMenu(menu, new MenuBuildUtils.MenuBuilder() {
@Override
public void buildMenu(Menu menu, MenuEvent event) {
Integer pn = (Integer) menu.getData("pieceNumber");
if (pn != null && pn != -1) {
DownloadManager download_manager = dlm;
if (download_manager == null) {
return;
}
DiskManager disk_manager = download_manager.getDiskManager();
PEPeerManager peer_manager = download_manager.getPeerManager();
if (disk_manager == null || peer_manager == null) {
return;
}
final PiecePicker picker = peer_manager.getPiecePicker();
DiskManagerPiece[] dm_pieces = disk_manager.getPieces();
PEPiece[] pe_pieces = peer_manager.getPieces();
final int piece_number = pn;
final DiskManagerPiece dm_piece = dm_pieces[piece_number];
final PEPiece pm_piece = pe_pieces[piece_number];
final MenuItem force_piece = new MenuItem(menu, SWT.CHECK);
Messages.setLanguageText(force_piece, "label.force.piece");
boolean done = dm_piece.isDone();
force_piece.setEnabled(!done);
if (!done) {
force_piece.setSelection(picker.isForcePiece(piece_number));
force_piece.addSelectionListener(new SelectionListenerForcePiece(picker, piece_number, force_piece));
}
final MenuItem reset_piece = new MenuItem(menu, SWT.PUSH);
Messages.setLanguageText(reset_piece, "label.reset.piece");
boolean can_reset = dm_piece.isDone() || dm_piece.getNbWritten() > 0;
reset_piece.setEnabled(can_reset);
reset_piece.addSelectionListener(new SelectionListenerResetPiece(dm_piece, pm_piece));
new MenuItem(menu, SWT.SEPARATOR);
final MenuItem seq_asc = new MenuItem(menu, SWT.PUSH);
Messages.setLanguageText(seq_asc, "label.seq.asc.from", new String[] { String.valueOf(piece_number) });
seq_asc.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
download_manager.getDownloadState().setFlag(DownloadManagerState.FLAG_SEQUENTIAL_DOWNLOAD, false);
picker.setReverseBlockOrder(false);
picker.setSequentialAscendingFrom(piece_number);
}
});
final MenuItem seq_desc = new MenuItem(menu, SWT.PUSH);
Messages.setLanguageText(seq_desc, "label.seq.desc.from", new String[] { String.valueOf(piece_number) });
seq_desc.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
download_manager.getDownloadState().setFlag(DownloadManagerState.FLAG_SEQUENTIAL_DOWNLOAD, false);
picker.setReverseBlockOrder(true);
picker.setSequentialDescendingFrom(piece_number);
}
});
final MenuItem seq_clear = new MenuItem(menu, SWT.PUSH);
Messages.setLanguageText(seq_clear, "label.seq.clear", new String[] { String.valueOf(piece_number) });
seq_clear.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
download_manager.getDownloadState().setFlag(DownloadManagerState.FLAG_SEQUENTIAL_DOWNLOAD, false);
picker.setReverseBlockOrder(false);
picker.clearSequential();
}
});
}
}
});
Legend.createLegendComposite(pieceInfoComposite, blockColors, new String[] { "PiecesView.BlockView.Have", "PiecesView.BlockView.NoHave", "PeersView.BlockView.Transfer", "PeersView.BlockView.NextRequest", "PeersView.BlockView.AvailCount", "PeersView.BlockView.ShowFile" }, new GridData(SWT.FILL, SWT.DEFAULT, true, false, 2, 1));
int iFontPixelsHeight = 10;
int iFontPointHeight = (iFontPixelsHeight * 72) / Utils.getDPIRaw(pieceInfoCanvas.getDisplay()).y;
Font f = pieceInfoCanvas.getFont();
FontData[] fontData = f.getFontData();
fontData[0].setHeight(iFontPointHeight);
font = new Font(pieceInfoCanvas.getDisplay(), fontData);
}
use of com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry in project BiglyBT by BiglySoftware.
the class RDResumeHandler method checkAllPieces.
public void checkAllPieces(boolean newfiles) {
// long start = System.currentTimeMillis();
DiskManagerRecheckInstance recheck_inst = disk_manager.getRecheckScheduler().register(disk_manager, false);
int overall_piece_size = disk_manager.getPieceLength();
final AESemaphore run_sem = new AESemaphore("RDResumeHandler::checkAllPieces:runsem", overall_piece_size > 32 * 1024 * 1024 ? 1 : 2);
final List<DiskManagerCheckRequest> failed_pieces = new ArrayList<>();
try {
boolean resume_data_complete = false;
try {
check_in_progress = true;
boolean resumeEnabled = use_fast_resume;
if (newfiles) {
resumeEnabled = false;
}
final AESemaphore pending_checks_sem = new AESemaphore("RD:PendingChecks");
int pending_check_num = 0;
DiskManagerPiece[] pieces = disk_manager.getPieces();
// calculate the current file sizes up front for performance reasons
DiskManagerFileInfo[] files = disk_manager.getFiles();
Map file_sizes = new HashMap();
for (int i = 0; i < files.length; i++) {
try {
Long len = new Long(((DiskManagerFileInfoImpl) files[i]).getCacheFile().getLength());
file_sizes.put(files[i], len);
} catch (CacheFileManagerException e) {
Debug.printStackTrace(e);
}
}
if (resumeEnabled) {
boolean resumeValid = false;
byte[] resume_pieces = null;
Map partialPieces = null;
Map resume_data = getResumeData();
if (resume_data != null) {
try {
resume_pieces = (byte[]) resume_data.get("resume data");
if (resume_pieces != null) {
if (resume_pieces.length != pieces.length) {
Debug.out("Resume data array length mismatch: " + resume_pieces.length + "/" + pieces.length);
resume_pieces = null;
}
}
partialPieces = (Map) resume_data.get("blocks");
resumeValid = ((Long) resume_data.get("valid")).intValue() == 1;
if (isTorrentResumeDataComplete(disk_manager.getDownloadManager().getDownloadState(), resume_data)) {
resume_data_complete = true;
} else {
// set it so that if we crash the NOT_DONE pieces will be
// rechecked
resume_data.put("valid", new Long(0));
saveResumeData(resume_data);
}
} catch (Exception ignore) {
// ignore.printStackTrace();
}
}
if (resume_pieces == null) {
check_is_full_check = true;
resumeValid = false;
resume_pieces = new byte[pieces.length];
Arrays.fill(resume_pieces, PIECE_RECHECK_REQUIRED);
}
check_resume_was_valid = resumeValid;
boolean recheck_all = use_fast_resume_recheck_all;
if (!recheck_all) {
// override if not much left undone
long total_not_done = 0;
int piece_size = disk_manager.getPieceLength();
for (int i = 0; i < pieces.length; i++) {
if (resume_pieces[i] != PIECE_DONE) {
total_not_done += piece_size;
}
}
if (total_not_done < 64 * 1024 * 1024) {
recheck_all = true;
}
}
if (Logger.isEnabled()) {
int total_not_done = 0;
int total_done = 0;
int total_started = 0;
int total_recheck = 0;
for (int i = 0; i < pieces.length; i++) {
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_NOT_DONE) {
total_not_done++;
} else if (piece_state == PIECE_DONE) {
total_done++;
} else if (piece_state == PIECE_STARTED) {
total_started++;
} else {
total_recheck++;
}
}
String str = "valid=" + resumeValid + ",not done=" + total_not_done + ",done=" + total_done + ",started=" + total_started + ",recheck=" + total_recheck + ",rc all=" + recheck_all + ",full=" + check_is_full_check;
Logger.log(new LogEvent(disk_manager, LOGID, str));
}
for (int i = 0; i < pieces.length; i++) {
check_position = i;
DiskManagerPiece dm_piece = pieces[i];
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_DONE || !resumeValid || recheck_all) {
// at least check that file sizes are OK for this piece to be valid
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is missing, " + "fails re-check."));
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is too small, fails re-check. File size = " + file_size + ", piece needs " + expected_size));
break;
}
}
}
if (piece_state == PIECE_DONE) {
dm_piece.setDone(true);
} else if (piece_state == PIECE_NOT_DONE && !recheck_all) {
// if the piece isn't done and we haven't been asked to recheck all pieces
// on restart (only started pieces) then just set as not done
} else {
// if the resume data is invalid or explicit recheck needed
if (pieceCannotExist) {
dm_piece.setDone(false);
} else if (piece_state == PIECE_RECHECK_REQUIRED || !resumeValid) {
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
} else {
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
}
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
if (partialPieces != null) {
Iterator iter = partialPieces.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry key = (Map.Entry) iter.next();
int pieceNumber = Integer.parseInt((String) key.getKey());
DiskManagerPiece dm_piece = pieces[pieceNumber];
if (!dm_piece.isDone()) {
List blocks = (List) partialPieces.get(key.getKey());
Iterator iterBlock = blocks.iterator();
while (iterBlock.hasNext()) {
dm_piece.setWritten(((Long) iterBlock.next()).intValue());
}
}
}
}
} else {
for (int i = 0; i < pieces.length; i++) {
check_position = i;
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
// check if there is an underlying file for this piece, if not set it to not done
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
pieceCannotExist = true;
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
pieceCannotExist = true;
break;
}
}
if (pieceCannotExist) {
disk_manager.getPiece(i).setDone(false);
continue;
}
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
}
if (failed_pieces.size() > 0 && !TEST_RECHECK_FAILURE_HANDLING) {
byte[][] piece_hashes = disk_manager.getTorrent().getPieces();
ByteArrayHashMap<Integer> hash_map = new ByteArrayHashMap<>();
for (int i = 0; i < piece_hashes.length; i++) {
hash_map.put(piece_hashes[i], i);
}
for (DiskManagerCheckRequest request : failed_pieces) {
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
byte[] hash = request.getHash();
if (hash != null) {
final Integer target_index = hash_map.get(hash);
int current_index = request.getPieceNumber();
int piece_size = disk_manager.getPieceLength(current_index);
if (target_index != null && target_index != current_index && disk_manager.getPieceLength(target_index) == piece_size && !disk_manager.isDone(target_index)) {
final AESemaphore sem = new AESemaphore("PieceReorder");
disk_manager.enqueueReadRequest(disk_manager.createReadRequest(current_index, 0, piece_size), new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
try {
disk_manager.enqueueWriteRequest(disk_manager.createWriteRequest(target_index, 0, data, null), new DiskManagerWriteRequestListener() {
@Override
public void writeCompleted(DiskManagerWriteRequest request) {
try {
DiskManagerCheckRequest check_request = disk_manager.createCheckRequest(target_index, null);
check_request.setLowPriority(true);
checker.enqueueCheckRequest(check_request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
sem.release();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
sem.release();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
}
}
}
}
} finally {
check_in_progress = false;
}
if (!(stopped || resume_data_complete)) {
try {
saveResumeData(true);
} catch (Exception e) {
Debug.out("Failed to dump initial resume data to disk");
Debug.printStackTrace(e);
}
}
} catch (Throwable e) {
// if something went wrong then log and continue.
Debug.printStackTrace(e);
} finally {
recheck_inst.unregister();
// System.out.println( "Check of '" + disk_manager.getDownloadManager().getDisplayName() + "' completed in " + (System.currentTimeMillis() - start));
}
}
use of com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry in project BiglyBT by BiglySoftware.
the class DMReaderImpl method readBlock.
@Override
public void readBlock(final DiskManagerReadRequest request, final DiskManagerReadRequestListener _listener) {
request.requestStarts();
final DiskManagerReadRequestListener listener = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
request.requestEnds(true);
_listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
request.requestEnds(false);
_listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
};
DirectByteBuffer buffer = null;
try {
int length = request.getLength();
buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_DM_READ, length);
if (buffer == null) {
// Fix for bug #804874
Debug.out("DiskManager::readBlock:: ByteBufferPool returned null buffer");
listener.readFailed(request, new Exception("Out of memory"));
return;
}
int pieceNumber = request.getPieceNumber();
int offset = request.getOffset();
DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
if (pieceList.size() == 0) {
Debug.out("no pieceList entries for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
long previousFilesLength = 0;
int currentFile = 0;
long fileOffset = pieceList.get(0).getOffset();
while (currentFile < pieceList.size() && pieceList.getCumulativeLengthToPiece(currentFile) < offset) {
previousFilesLength = pieceList.getCumulativeLengthToPiece(currentFile);
currentFile++;
fileOffset = 0;
}
// update the offset (we're in the middle of a file)
fileOffset += offset - previousFilesLength;
List chunks = new ArrayList();
int buffer_position = 0;
while (buffer_position < length && currentFile < pieceList.size()) {
DMPieceMapEntry map_entry = pieceList.get(currentFile);
int length_available = map_entry.getLength() - (int) (fileOffset - map_entry.getOffset());
// explicitly limit the read size to the proper length, rather than relying on the underlying file being correctly-sized
// see long DMWriterAndCheckerImpl::checkPiece note
int entry_read_limit = buffer_position + length_available;
// now bring down to the required read length if this is shorter than this
// chunk of data
entry_read_limit = Math.min(length, entry_read_limit);
// this chunk denotes a read up to buffer offset "entry_read_limit"
chunks.add(new Object[] { map_entry.getFile().getCacheFile(), new Long(fileOffset), new Integer(entry_read_limit) });
buffer_position = entry_read_limit;
currentFile++;
fileOffset = 0;
}
if (chunks.size() == 0) {
Debug.out("no chunk reads for " + pieceNumber);
listener.readCompleted(request, buffer);
return;
}
// this is where we go async and need to start counting requests for the sake
// of shutting down tidily
// have to wrap the request as we can validly have >1 for same piece/offset/length and
// the request type itself overrides object equiv based on this...
final Object[] request_wrapper = { request };
DiskManagerReadRequestListener l = new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
complete();
listener.readCompleted(request, data);
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
complete();
listener.readFailed(request, cause);
}
@Override
public int getPriority() {
return (_listener.getPriority());
}
@Override
public void requestExecuted(long bytes) {
_listener.requestExecuted(bytes);
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (!read_requests.remove(request_wrapper)) {
Debug.out("request not found");
}
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
};
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.readFailed(request, new Exception("Disk reader has been stopped"));
return;
}
async_reads++;
read_requests.add(request_wrapper);
} finally {
this_mon.exit();
}
new requestDispatcher(request, l, buffer, chunks);
} catch (Throwable e) {
if (buffer != null) {
buffer.returnToPool();
}
disk_manager.setFailed("Disk read error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.readFailed(request, e);
}
}
use of com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry in project BiglyBT by BiglySoftware.
the class DMCheckerImpl method enqueueCheckRequestSupport.
protected void enqueueCheckRequestSupport(final DiskManagerCheckRequest request, final DiskManagerCheckRequestListener listener, boolean read_flush) {
if (!checking_enabled) {
listener.checkCompleted(request, true);
return;
}
final int pieceNumber = request.getPieceNumber();
try {
final byte[] required_hash = disk_manager.getPieceHash(pieceNumber);
// quick check that the files that make up this piece are at least big enough
// to warrant reading the data to check
// also, if the piece is entirely compact then we can immediately
// fail as we don't actually have any data for the piece (or can assume we don't)
// we relax this a bit to catch pieces that are part of compact files with less than
// three pieces as it is possible that these were once complete and have all their bits
// living in retained compact areas
final DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
try {
// there are other comments in the code about the existence of 0 length piece lists
// just in case these still occur for who knows what reason ensure that a 0 length list
// causes the code to carry on and do the check (i.e. it is no worse that before this
// optimisation was added...)
boolean all_compact = pieceList.size() > 0;
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = piece_entry.getFile();
CacheFile cache_file = file_info.getCacheFile();
if (cache_file.compareLength(piece_entry.getOffset()) < 0) {
listener.checkCompleted(request, false);
return;
}
if (all_compact) {
int st = cache_file.getStorageType();
if ((st != CacheFile.CT_COMPACT && st != CacheFile.CT_PIECE_REORDER_COMPACT) || file_info.getNbPieces() <= 2) {
all_compact = false;
}
}
}
if (all_compact) {
// System.out.println( "Piece " + pieceNumber + " is all compact, failing hash check" );
listener.checkCompleted(request, false);
return;
}
} catch (Throwable e) {
// we can fail here if the disk manager has been stopped as the cache file length access may be being
// performed on a "closed" (i.e. un-owned) file
listener.checkCancelled(request);
return;
}
int this_piece_length = disk_manager.getPieceLength(pieceNumber);
DiskManagerReadRequest read_request = disk_manager.createReadRequest(pieceNumber, 0, this_piece_length);
try {
this_mon.enter();
if (stopped) {
listener.checkCancelled(request);
return;
}
async_reads++;
} finally {
this_mon.exit();
}
read_request.setFlush(read_flush);
read_request.setUseCache(!request.isAdHoc());
disk_manager.enqueueReadRequest(read_request, new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest read_request, DirectByteBuffer buffer) {
complete();
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.checkCancelled(request);
return;
}
async_checks++;
} finally {
this_mon.exit();
}
if (buffer.getFlag(DirectByteBuffer.FL_CONTAINS_TRANSIENT_DATA)) {
try {
buffer.returnToPool();
listener.checkCompleted(request, false);
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
} else {
try {
final DirectByteBuffer f_buffer = buffer;
ConcurrentHasher.getSingleton().addRequest(buffer.getBuffer(DirectByteBuffer.SS_DW), new ConcurrentHasherRequestListener() {
@Override
public void complete(ConcurrentHasherRequest hash_request) {
// cancelled
int async_result = 3;
try {
byte[] actual_hash = hash_request.getResult();
if (actual_hash != null) {
request.setHash(actual_hash);
// success
async_result = 1;
for (int i = 0; i < actual_hash.length; i++) {
if (actual_hash[i] != required_hash[i]) {
// failed;
async_result = 2;
break;
}
}
}
} finally {
try {
if (async_result == 1) {
try {
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = piece_entry.getFile();
if (file_info.getLength() > 0 || !file_info.isSkipped()) {
CacheFile cache_file = file_info.getCacheFile();
if (!read_flush && file_info.getStorageType() == DiskManagerFileInfoImpl.ST_REORDER) {
// got to ensure written to disk before setting complete as the re-order
// logic requires this
cache_file.flushCache(piece_entry.getOffset(), piece_entry.getLength());
}
cache_file.setPieceComplete(pieceNumber, f_buffer);
}
}
} catch (Throwable e) {
f_buffer.returnToPool();
Debug.out(e);
listener.checkFailed(request, e);
return;
}
}
f_buffer.returnToPool();
if (async_result == 1) {
listener.checkCompleted(request, true);
} else if (async_result == 2) {
listener.checkCompleted(request, false);
} else {
listener.checkCancelled(request);
}
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
}
}
}, request.isLowPriority());
} catch (Throwable e) {
Debug.printStackTrace(e);
buffer.returnToPool();
listener.checkFailed(request, e);
}
}
}
@Override
public void readFailed(DiskManagerReadRequest read_request, Throwable cause) {
complete();
listener.checkFailed(request, cause);
}
@Override
public int getPriority() {
return (checking_read_priority ? 0 : -1);
}
@Override
public void requestExecuted(long bytes) {
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
});
} catch (Throwable e) {
disk_manager.setFailed("Piece check error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.checkFailed(request, e);
}
}
use of com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry in project BiglyBT by BiglySoftware.
the class DMWriterImpl method writeBlock.
@Override
public void writeBlock(final DiskManagerWriteRequest request, final DiskManagerWriteRequestListener _listener) {
request.requestStarts();
final DiskManagerWriteRequestListener listener = new DiskManagerWriteRequestListener() {
@Override
public void writeCompleted(DiskManagerWriteRequest request) {
request.requestEnds(true);
_listener.writeCompleted(request);
}
@Override
public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
request.requestEnds(false);
_listener.writeFailed(request, cause);
}
};
try {
int pieceNumber = request.getPieceNumber();
DirectByteBuffer buffer = request.getBuffer();
int offset = request.getOffset();
// Do not allow to write in a piece marked as done. we can get here if
final DiskManagerPiece dmPiece = disk_manager.getPieces()[pieceNumber];
if (dmPiece.isDone()) {
// Debug.out( "write: piece already done (" + request.getPieceNumber() + "/" + request.getOffset());
buffer.returnToPool();
// XXX: no writing was done; is this neccesary for complete()?
listener.writeCompleted(request);
} else {
int buffer_position = buffer.position(DirectByteBuffer.SS_DW);
int buffer_limit = buffer.limit(DirectByteBuffer.SS_DW);
// final long write_length = buffer_limit - buffer_position;
int previousFilesLength = 0;
int currentFile = 0;
DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
DMPieceMapEntry current_piece = pieceList.get(currentFile);
long fileOffset = current_piece.getOffset();
while ((previousFilesLength + current_piece.getLength()) < offset) {
previousFilesLength += current_piece.getLength();
currentFile++;
fileOffset = 0;
current_piece = pieceList.get(currentFile);
}
List chunks = new ArrayList();
while (buffer_position < buffer_limit) {
current_piece = pieceList.get(currentFile);
long file_limit = buffer_position + ((current_piece.getFile().getLength() - current_piece.getOffset()) - (offset - previousFilesLength));
if (file_limit > buffer_limit) {
file_limit = buffer_limit;
}
if (file_limit > buffer_position) {
long file_pos = fileOffset + (offset - previousFilesLength);
chunks.add(new Object[] { current_piece.getFile(), new Long(file_pos), new Integer((int) file_limit) });
buffer_position = (int) file_limit;
}
currentFile++;
fileOffset = 0;
previousFilesLength = offset;
}
DiskManagerWriteRequestListener l = new DiskManagerWriteRequestListener() {
@Override
public void writeCompleted(DiskManagerWriteRequest request) {
complete();
listener.writeCompleted(request);
}
@Override
public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
complete();
if (dmPiece.isDone()) {
if (Logger.isEnabled()) {
Logger.log(new LogEvent(disk_manager, LOGID, "Piece " + dmPiece.getPieceNumber() + " write failed but already marked as done"));
}
listener.writeCompleted(request);
} else {
disk_manager.setFailed("Disk write error - " + Debug.getNestedExceptionMessage(cause));
Debug.printStackTrace(cause);
listener.writeFailed(request, cause);
}
}
protected void complete() {
try {
this_mon.enter();
async_writes--;
if (!write_requests.remove(request)) {
Debug.out("request not found");
}
if (stopped) {
async_write_sem.release();
}
} finally {
this_mon.exit();
}
}
};
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.writeFailed(request, new Exception("Disk writer has been stopped"));
return;
} else {
async_writes++;
write_requests.add(request);
}
} finally {
this_mon.exit();
}
new requestDispatcher(request, l, buffer, chunks);
}
} catch (Throwable e) {
request.getBuffer().returnToPool();
disk_manager.setFailed("Disk write error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.writeFailed(request, e);
}
}
Aggregations