use of com.biglybt.core.disk.impl.DiskManagerFileInfoImpl in project BiglyBT by BiglySoftware.
the class RDResumeHandler method checkAllPieces.
public void checkAllPieces(boolean newfiles) {
// long start = System.currentTimeMillis();
DiskManagerRecheckInstance recheck_inst = disk_manager.getRecheckScheduler().register(disk_manager, false);
int overall_piece_size = disk_manager.getPieceLength();
final AESemaphore run_sem = new AESemaphore("RDResumeHandler::checkAllPieces:runsem", overall_piece_size > 32 * 1024 * 1024 ? 1 : 2);
final List<DiskManagerCheckRequest> failed_pieces = new ArrayList<>();
try {
boolean resume_data_complete = false;
try {
check_in_progress = true;
boolean resumeEnabled = use_fast_resume;
if (newfiles) {
resumeEnabled = false;
}
final AESemaphore pending_checks_sem = new AESemaphore("RD:PendingChecks");
int pending_check_num = 0;
DiskManagerPiece[] pieces = disk_manager.getPieces();
// calculate the current file sizes up front for performance reasons
DiskManagerFileInfo[] files = disk_manager.getFiles();
Map file_sizes = new HashMap();
for (int i = 0; i < files.length; i++) {
try {
Long len = new Long(((DiskManagerFileInfoImpl) files[i]).getCacheFile().getLength());
file_sizes.put(files[i], len);
} catch (CacheFileManagerException e) {
Debug.printStackTrace(e);
}
}
if (resumeEnabled) {
boolean resumeValid = false;
byte[] resume_pieces = null;
Map partialPieces = null;
Map resume_data = getResumeData();
if (resume_data != null) {
try {
resume_pieces = (byte[]) resume_data.get("resume data");
if (resume_pieces != null) {
if (resume_pieces.length != pieces.length) {
Debug.out("Resume data array length mismatch: " + resume_pieces.length + "/" + pieces.length);
resume_pieces = null;
}
}
partialPieces = (Map) resume_data.get("blocks");
resumeValid = ((Long) resume_data.get("valid")).intValue() == 1;
if (isTorrentResumeDataComplete(disk_manager.getDownloadManager().getDownloadState(), resume_data)) {
resume_data_complete = true;
} else {
// set it so that if we crash the NOT_DONE pieces will be
// rechecked
resume_data.put("valid", new Long(0));
saveResumeData(resume_data);
}
} catch (Exception ignore) {
// ignore.printStackTrace();
}
}
if (resume_pieces == null) {
check_is_full_check = true;
resumeValid = false;
resume_pieces = new byte[pieces.length];
Arrays.fill(resume_pieces, PIECE_RECHECK_REQUIRED);
}
check_resume_was_valid = resumeValid;
boolean recheck_all = use_fast_resume_recheck_all;
if (!recheck_all) {
// override if not much left undone
long total_not_done = 0;
int piece_size = disk_manager.getPieceLength();
for (int i = 0; i < pieces.length; i++) {
if (resume_pieces[i] != PIECE_DONE) {
total_not_done += piece_size;
}
}
if (total_not_done < 64 * 1024 * 1024) {
recheck_all = true;
}
}
if (Logger.isEnabled()) {
int total_not_done = 0;
int total_done = 0;
int total_started = 0;
int total_recheck = 0;
for (int i = 0; i < pieces.length; i++) {
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_NOT_DONE) {
total_not_done++;
} else if (piece_state == PIECE_DONE) {
total_done++;
} else if (piece_state == PIECE_STARTED) {
total_started++;
} else {
total_recheck++;
}
}
String str = "valid=" + resumeValid + ",not done=" + total_not_done + ",done=" + total_done + ",started=" + total_started + ",recheck=" + total_recheck + ",rc all=" + recheck_all + ",full=" + check_is_full_check;
Logger.log(new LogEvent(disk_manager, LOGID, str));
}
for (int i = 0; i < pieces.length; i++) {
check_position = i;
DiskManagerPiece dm_piece = pieces[i];
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_DONE || !resumeValid || recheck_all) {
// at least check that file sizes are OK for this piece to be valid
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is missing, " + "fails re-check."));
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is too small, fails re-check. File size = " + file_size + ", piece needs " + expected_size));
break;
}
}
}
if (piece_state == PIECE_DONE) {
dm_piece.setDone(true);
} else if (piece_state == PIECE_NOT_DONE && !recheck_all) {
// if the piece isn't done and we haven't been asked to recheck all pieces
// on restart (only started pieces) then just set as not done
} else {
// if the resume data is invalid or explicit recheck needed
if (pieceCannotExist) {
dm_piece.setDone(false);
} else if (piece_state == PIECE_RECHECK_REQUIRED || !resumeValid) {
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
} else {
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
}
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
if (partialPieces != null) {
Iterator iter = partialPieces.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry key = (Map.Entry) iter.next();
int pieceNumber = Integer.parseInt((String) key.getKey());
DiskManagerPiece dm_piece = pieces[pieceNumber];
if (!dm_piece.isDone()) {
List blocks = (List) partialPieces.get(key.getKey());
Iterator iterBlock = blocks.iterator();
while (iterBlock.hasNext()) {
dm_piece.setWritten(((Long) iterBlock.next()).intValue());
}
}
}
}
} else {
for (int i = 0; i < pieces.length; i++) {
check_position = i;
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
// check if there is an underlying file for this piece, if not set it to not done
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
pieceCannotExist = true;
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
pieceCannotExist = true;
break;
}
}
if (pieceCannotExist) {
disk_manager.getPiece(i).setDone(false);
continue;
}
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
}
if (failed_pieces.size() > 0 && !TEST_RECHECK_FAILURE_HANDLING) {
byte[][] piece_hashes = disk_manager.getTorrent().getPieces();
ByteArrayHashMap<Integer> hash_map = new ByteArrayHashMap<>();
for (int i = 0; i < piece_hashes.length; i++) {
hash_map.put(piece_hashes[i], i);
}
for (DiskManagerCheckRequest request : failed_pieces) {
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
byte[] hash = request.getHash();
if (hash != null) {
final Integer target_index = hash_map.get(hash);
int current_index = request.getPieceNumber();
int piece_size = disk_manager.getPieceLength(current_index);
if (target_index != null && target_index != current_index && disk_manager.getPieceLength(target_index) == piece_size && !disk_manager.isDone(target_index)) {
final AESemaphore sem = new AESemaphore("PieceReorder");
disk_manager.enqueueReadRequest(disk_manager.createReadRequest(current_index, 0, piece_size), new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
try {
disk_manager.enqueueWriteRequest(disk_manager.createWriteRequest(target_index, 0, data, null), new DiskManagerWriteRequestListener() {
@Override
public void writeCompleted(DiskManagerWriteRequest request) {
try {
DiskManagerCheckRequest check_request = disk_manager.createCheckRequest(target_index, null);
check_request.setLowPriority(true);
checker.enqueueCheckRequest(check_request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
sem.release();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
sem.release();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
}
}
}
}
} finally {
check_in_progress = false;
}
if (!(stopped || resume_data_complete)) {
try {
saveResumeData(true);
} catch (Exception e) {
Debug.out("Failed to dump initial resume data to disk");
Debug.printStackTrace(e);
}
}
} catch (Throwable e) {
// if something went wrong then log and continue.
Debug.printStackTrace(e);
} finally {
recheck_inst.unregister();
// System.out.println( "Check of '" + disk_manager.getDownloadManager().getDisplayName() + "' completed in " + (System.currentTimeMillis() - start));
}
}
use of com.biglybt.core.disk.impl.DiskManagerFileInfoImpl in project BiglyBT by BiglySoftware.
the class DMCheckerImpl method enqueueCheckRequestSupport.
protected void enqueueCheckRequestSupport(final DiskManagerCheckRequest request, final DiskManagerCheckRequestListener listener, boolean read_flush, boolean hash_requested) {
if (!checking_enabled) {
listener.checkCompleted(request, true);
return;
}
final int pieceNumber = request.getPieceNumber();
try {
final byte[] required_hash = disk_manager.getPieceHash(pieceNumber);
if (required_hash == null) {
if (!hash_requested) {
if (listener.hashRequest(pieceNumber, new DiskManagerCheckRequestListener.HashListener() {
@Override
public int getPieceNumber() {
return (pieceNumber);
}
public void complete(boolean success) {
if (!success) {
Debug.out("Failed to get hash for piece " + request.getPieceNumber());
}
enqueueCheckRequestSupport(request, listener, read_flush, true);
}
})) {
return;
}
}
listener.checkFailed(request, new Exception("V2 hash for piece " + pieceNumber + " not available"));
return;
}
// quick check that the files that make up this piece are at least big enough
// to warrant reading the data to check
// also, if the piece is entirely compact then we can immediately
// fail as we don't actually have any data for the piece (or can assume we don't)
// we relax this a bit to catch pieces that are part of compact files with less than
// three pieces as it is possible that these were once complete and have all their bits
// living in retained compact areas
final DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
try {
// there are other comments in the code about the existence of 0 length piece lists
// just in case these still occur for who knows what reason ensure that a 0 length list
// causes the code to carry on and do the check (i.e. it is no worse that before this
// optimisation was added...)
boolean all_compact = pieceList.size() > 0;
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = (DiskManagerFileInfoImpl) piece_entry.getFile();
CacheFile cache_file = file_info.getCacheFile();
if (cache_file.compareLength(piece_entry.getOffset()) < 0) {
listener.checkCompleted(request, false);
return;
}
if (all_compact) {
int st = cache_file.getStorageType();
if ((st != CacheFile.CT_COMPACT && st != CacheFile.CT_PIECE_REORDER_COMPACT) || file_info.getNbPieces() <= 2) {
all_compact = false;
}
}
}
if (all_compact) {
// System.out.println( "Piece " + pieceNumber + " is all compact, failing hash check" );
listener.checkCompleted(request, false);
return;
}
} catch (Throwable e) {
// we can fail here if the disk manager has been stopped as the cache file length access may be being
// performed on a "closed" (i.e. un-owned) file
listener.checkCancelled(request);
return;
}
int this_piece_length = disk_manager.getPieceLength(pieceNumber);
DiskManagerReadRequest read_request = disk_manager.createReadRequest(pieceNumber, 0, this_piece_length);
try {
this_mon.enter();
if (stopped) {
listener.checkCancelled(request);
return;
}
async_reads++;
} finally {
this_mon.exit();
}
read_request.setFlush(read_flush);
read_request.setUseCache(!request.isAdHoc());
read_request.setErrorIsFatal(request.getErrorIsFatal());
disk_manager.enqueueReadRequest(read_request, new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest read_request, DirectByteBuffer buffer) {
complete();
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.checkCancelled(request);
return;
}
async_checks++;
} finally {
this_mon.exit();
}
if (buffer.getFlag(DirectByteBuffer.FL_CONTAINS_TRANSIENT_DATA)) {
try {
buffer.returnToPool();
listener.checkCompleted(request, false);
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
} else {
try {
final DirectByteBuffer f_buffer = buffer;
int piece_length = disk_manager.getPieceLength();
int hash_version = required_hash.length == 20 ? 1 : 2;
long v2_file_length;
ByteBuffer byte_buffer = buffer.getBuffer(DirectByteBuffer.SS_DW);
if (hash_version == 2) {
DMPieceMapEntry piece_entry = pieceList.get(0);
v2_file_length = piece_entry.getFile().getLength();
if (pieceList.size() == 2) {
int v2_piece_length = piece_entry.getLength();
if (v2_piece_length < piece_length) {
// hasher will pad appropriately
byte_buffer.limit(byte_buffer.position() + v2_piece_length);
}
}
} else {
v2_file_length = -1;
}
ConcurrentHasher.getSingleton().addRequest(byte_buffer, hash_version, piece_length, v2_file_length, new ConcurrentHasherRequestListener() {
@Override
public void complete(ConcurrentHasherRequest hash_request) {
// cancelled
int async_result = 3;
try {
byte[] actual_hash = hash_request.getResult();
if (actual_hash != null) {
request.setHash(actual_hash);
// success
async_result = 1;
for (int i = 0; i < actual_hash.length; i++) {
if (actual_hash[i] != required_hash[i]) {
// failed;
async_result = 2;
break;
}
}
}
} finally {
try {
if (async_result == 1) {
try {
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = (DiskManagerFileInfoImpl) piece_entry.getFile();
if (file_info.getLength() > 0 || !file_info.isSkipped()) {
CacheFile cache_file = file_info.getCacheFile();
if (!read_flush && file_info.getStorageType() == DiskManagerFileInfoImpl.ST_REORDER) {
// got to ensure written to disk before setting complete as the re-order
// logic requires this
cache_file.flushCache(piece_entry.getOffset(), piece_entry.getLength());
}
cache_file.setPieceComplete(pieceNumber, f_buffer);
}
}
} catch (Throwable e) {
f_buffer.returnToPool();
Debug.out(e);
listener.checkFailed(request, e);
return;
}
}
f_buffer.returnToPool();
if (async_result == 1) {
listener.checkCompleted(request, true);
} else if (async_result == 2) {
listener.checkCompleted(request, false);
} else {
listener.checkCancelled(request);
}
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
}
}
}, request.isLowPriority());
} catch (Throwable e) {
Debug.printStackTrace(e);
buffer.returnToPool();
listener.checkFailed(request, e);
}
}
}
@Override
public void readFailed(DiskManagerReadRequest read_request, Throwable cause) {
complete();
listener.checkFailed(request, cause);
}
@Override
public int getPriority() {
return (checking_read_priority ? 0 : -1);
}
@Override
public void requestExecuted(long bytes) {
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
});
} catch (Throwable e) {
disk_manager.setFailed(DiskManager.ET_OTHER, "Piece check error", e);
Debug.printStackTrace(e);
listener.checkFailed(request, e);
}
}
Aggregations