use of com.biglybt.core.util.DirectByteBuffer in project BiglyBT by BiglySoftware.
the class OutgoingMessageQueueImpl method notifyOfExternallySentMessage.
/**
* Notifty the queue (and its listeners) of a message sent externally on the queue's behalf.
* @param message sent externally
*/
@Override
public void notifyOfExternallySentMessage(Message message) {
ArrayList listeners_ref = listeners;
DirectByteBuffer[] buffs = message.getData();
int size = 0;
for (int i = 0; i < buffs.length; i++) {
size += buffs[i].remaining(DirectByteBuffer.SS_NET);
}
for (int i = 0; i < listeners_ref.size(); i++) {
MessageQueueListener listener = (MessageQueueListener) listeners_ref.get(i);
listener.messageSent(message);
if (message.getType() == Message.TYPE_DATA_PAYLOAD) {
listener.dataBytesSent(size);
} else {
listener.protocolBytesSent(size);
}
}
// System.out.println( "notifiedOfExternallySentMessage:: [" +message.getID()+ "] size=" +size );
}
use of com.biglybt.core.util.DirectByteBuffer in project BiglyBT by BiglySoftware.
the class OutgoingMessageQueueImpl method addMessage.
/**
* Add a message to the message queue.
* NOTE: Allows for manual listener notification at some later time,
* using doListenerNotifications(), instead of notifying immediately
* from within this method. This is useful if you want to invoke
* listeners outside of some greater synchronised block to avoid
* deadlock.
* @param message message to add
* @param manual_listener_notify true for manual notification, false for automatic
*/
@Override
public void addMessage(Message message, boolean manual_listener_notify) {
// do message add notifications
boolean allowed = true;
ArrayList list_ref = listeners;
for (int i = 0; i < list_ref.size(); i++) {
MessageQueueListener listener = (MessageQueueListener) list_ref.get(i);
allowed = allowed && listener.messageAdded(message);
}
if (!allowed) {
// message.destroy(); //TODO destroy????
return;
}
RawMessage[] rmesgs = stream_encoder.encodeMessage(message);
if (destroyed) {
// queue is shutdown, drop any added messages
for (int i = 0; i < rmesgs.length; i++) {
rmesgs[i].destroy();
}
return;
}
for (int i = 0; i < rmesgs.length; i++) {
RawMessage rmesg = rmesgs[i];
removeMessagesOfType(rmesg.messagesToRemove(), manual_listener_notify);
try {
queue_mon.enter();
int pos = 0;
for (Iterator<RawMessage> it = queue.iterator(); it.hasNext(); ) {
RawMessage msg = it.next();
if (rmesg.getPriority() > msg.getPriority() && msg.getRawData()[0].position(DirectByteBuffer.SS_NET) == 0) {
// but don't insert in front of a half-sent message
break;
}
pos++;
}
if (rmesg.isNoDelay()) {
urgent_message = rmesg;
}
queue.add(pos, rmesg);
DirectByteBuffer[] payload = rmesg.getRawData();
int remaining = 0;
for (int j = 0; j < payload.length; j++) {
remaining += payload[j].remaining(DirectByteBuffer.SS_NET);
}
total_size += remaining;
if (rmesg.getType() == Message.TYPE_DATA_PAYLOAD) {
total_data_size += remaining;
}
} finally {
queue_mon.exit();
}
if (manual_listener_notify) {
// register listener event for later, manual notification
NotificationItem item = new NotificationItem(NotificationItem.MESSAGE_ADDED);
item.message = rmesg;
try {
delayed_notifications_mon.enter();
delayed_notifications.add(item);
} finally {
delayed_notifications_mon.exit();
}
} else {
// do listener notification now
ArrayList listeners_ref = listeners;
for (int j = 0; j < listeners_ref.size(); j++) {
MessageQueueListener listener = (MessageQueueListener) listeners_ref.get(j);
listener.messageQueued(rmesg.getBaseMessage());
}
}
}
}
use of com.biglybt.core.util.DirectByteBuffer in project BiglyBT by BiglySoftware.
the class RawMessageAdapter method getRawData.
// core raw message implementation
@Override
public DirectByteBuffer[] getRawData() {
if (plug_msg == null) {
return core_msg.getRawData();
}
ByteBuffer[] bbs = plug_msg.getRawPayload();
// TODO cache it???
DirectByteBuffer[] dbbs = new DirectByteBuffer[bbs.length];
for (int i = 0; i < bbs.length; i++) {
dbbs[i] = new DirectByteBuffer(bbs[i]);
}
return dbbs;
}
use of com.biglybt.core.util.DirectByteBuffer in project BiglyBT by BiglySoftware.
the class FMFileTestImpl method writeSupport.
@Override
protected void writeSupport(DirectByteBuffer[] buffers, long offset) throws FMFileManagerException {
offset += file_offset_in_torrent;
for (int i = 0; i < buffers.length; i++) {
DirectByteBuffer buffer = buffers[i];
if (AEDiagnostics.CHECK_DUMMY_FILE_DATA) {
while (buffer.hasRemaining(DirectByteBuffer.SS_FILE)) {
byte v = buffer.get(DirectByteBuffer.SS_FILE);
if ((byte) offset != v) {
System.out.println("FMFileTest: write is bad at " + offset + ": expected = " + (byte) offset + ", actual = " + v);
offset += buffer.remaining(DirectByteBuffer.SS_FILE) + 1;
break;
}
offset++;
}
}
buffer.position(DirectByteBuffer.SS_FILE, buffer.limit(DirectByteBuffer.SS_FILE));
}
}
use of com.biglybt.core.util.DirectByteBuffer in project BiglyBT by BiglySoftware.
the class RDResumeHandler method checkAllPieces.
public void checkAllPieces(boolean newfiles) {
// long start = System.currentTimeMillis();
DiskManagerRecheckInstance recheck_inst = disk_manager.getRecheckScheduler().register(disk_manager, false);
int overall_piece_size = disk_manager.getPieceLength();
final AESemaphore run_sem = new AESemaphore("RDResumeHandler::checkAllPieces:runsem", overall_piece_size > 32 * 1024 * 1024 ? 1 : 2);
final List<DiskManagerCheckRequest> failed_pieces = new ArrayList<>();
try {
boolean resume_data_complete = false;
try {
check_in_progress = true;
boolean resumeEnabled = use_fast_resume;
if (newfiles) {
resumeEnabled = false;
}
final AESemaphore pending_checks_sem = new AESemaphore("RD:PendingChecks");
int pending_check_num = 0;
DiskManagerPiece[] pieces = disk_manager.getPieces();
// calculate the current file sizes up front for performance reasons
DiskManagerFileInfo[] files = disk_manager.getFiles();
Map file_sizes = new HashMap();
for (int i = 0; i < files.length; i++) {
try {
Long len = new Long(((DiskManagerFileInfoImpl) files[i]).getCacheFile().getLength());
file_sizes.put(files[i], len);
} catch (CacheFileManagerException e) {
Debug.printStackTrace(e);
}
}
if (resumeEnabled) {
boolean resumeValid = false;
byte[] resume_pieces = null;
Map partialPieces = null;
Map resume_data = getResumeData();
if (resume_data != null) {
try {
resume_pieces = (byte[]) resume_data.get("resume data");
if (resume_pieces != null) {
if (resume_pieces.length != pieces.length) {
Debug.out("Resume data array length mismatch: " + resume_pieces.length + "/" + pieces.length);
resume_pieces = null;
}
}
partialPieces = (Map) resume_data.get("blocks");
resumeValid = ((Long) resume_data.get("valid")).intValue() == 1;
if (isTorrentResumeDataComplete(disk_manager.getDownloadManager().getDownloadState(), resume_data)) {
resume_data_complete = true;
} else {
// set it so that if we crash the NOT_DONE pieces will be
// rechecked
resume_data.put("valid", new Long(0));
saveResumeData(resume_data);
}
} catch (Exception ignore) {
// ignore.printStackTrace();
}
}
if (resume_pieces == null) {
check_is_full_check = true;
resumeValid = false;
resume_pieces = new byte[pieces.length];
Arrays.fill(resume_pieces, PIECE_RECHECK_REQUIRED);
}
check_resume_was_valid = resumeValid;
boolean recheck_all = use_fast_resume_recheck_all;
if (!recheck_all) {
// override if not much left undone
long total_not_done = 0;
int piece_size = disk_manager.getPieceLength();
for (int i = 0; i < pieces.length; i++) {
if (resume_pieces[i] != PIECE_DONE) {
total_not_done += piece_size;
}
}
if (total_not_done < 64 * 1024 * 1024) {
recheck_all = true;
}
}
if (Logger.isEnabled()) {
int total_not_done = 0;
int total_done = 0;
int total_started = 0;
int total_recheck = 0;
for (int i = 0; i < pieces.length; i++) {
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_NOT_DONE) {
total_not_done++;
} else if (piece_state == PIECE_DONE) {
total_done++;
} else if (piece_state == PIECE_STARTED) {
total_started++;
} else {
total_recheck++;
}
}
String str = "valid=" + resumeValid + ",not done=" + total_not_done + ",done=" + total_done + ",started=" + total_started + ",recheck=" + total_recheck + ",rc all=" + recheck_all + ",full=" + check_is_full_check;
Logger.log(new LogEvent(disk_manager, LOGID, str));
}
for (int i = 0; i < pieces.length; i++) {
check_position = i;
DiskManagerPiece dm_piece = pieces[i];
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
byte piece_state = resume_pieces[i];
if (piece_state == PIECE_DONE || !resumeValid || recheck_all) {
// at least check that file sizes are OK for this piece to be valid
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is missing, " + "fails re-check."));
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
piece_state = PIECE_NOT_DONE;
pieceCannotExist = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is too small, fails re-check. File size = " + file_size + ", piece needs " + expected_size));
break;
}
}
}
if (piece_state == PIECE_DONE) {
dm_piece.setDone(true);
} else if (piece_state == PIECE_NOT_DONE && !recheck_all) {
// if the piece isn't done and we haven't been asked to recheck all pieces
// on restart (only started pieces) then just set as not done
} else {
// if the resume data is invalid or explicit recheck needed
if (pieceCannotExist) {
dm_piece.setDone(false);
} else if (piece_state == PIECE_RECHECK_REQUIRED || !resumeValid) {
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
} else {
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
}
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
if (partialPieces != null) {
Iterator iter = partialPieces.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry key = (Map.Entry) iter.next();
int pieceNumber = Integer.parseInt((String) key.getKey());
DiskManagerPiece dm_piece = pieces[pieceNumber];
if (!dm_piece.isDone()) {
List blocks = (List) partialPieces.get(key.getKey());
Iterator iterBlock = blocks.iterator();
while (iterBlock.hasNext()) {
dm_piece.setWritten(((Long) iterBlock.next()).intValue());
}
}
}
}
} else {
for (int i = 0; i < pieces.length; i++) {
check_position = i;
disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
boolean pieceCannotExist = false;
// check if there is an underlying file for this piece, if not set it to not done
DMPieceList list = disk_manager.getPieceList(i);
for (int j = 0; j < list.size(); j++) {
DMPieceMapEntry entry = list.get(j);
Long file_size = (Long) file_sizes.get(entry.getFile());
if (file_size == null) {
pieceCannotExist = true;
break;
}
long expected_size = entry.getOffset() + entry.getLength();
if (file_size.longValue() < expected_size) {
pieceCannotExist = true;
break;
}
}
if (pieceCannotExist) {
disk_manager.getPiece(i).setDone(false);
continue;
}
run_sem.reserve();
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
try {
DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
request.setLowPriority(true);
checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
disk_manager.getPiece(request.getPieceNumber()).setDone(false);
passed = false;
}
if (!passed) {
synchronized (failed_pieces) {
failed_pieces.add(request);
}
}
complete();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
complete();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
complete();
}
protected void complete() {
run_sem.release();
pending_checks_sem.release();
}
});
pending_check_num++;
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
while (pending_check_num > 0) {
pending_checks_sem.reserve();
pending_check_num--;
}
}
if (failed_pieces.size() > 0 && !TEST_RECHECK_FAILURE_HANDLING) {
byte[][] piece_hashes = disk_manager.getTorrent().getPieces();
ByteArrayHashMap<Integer> hash_map = new ByteArrayHashMap<>();
for (int i = 0; i < piece_hashes.length; i++) {
hash_map.put(piece_hashes[i], i);
}
for (DiskManagerCheckRequest request : failed_pieces) {
while (!stopped) {
if (recheck_inst.getPermission()) {
break;
}
}
if (stopped) {
break;
}
byte[] hash = request.getHash();
if (hash != null) {
final Integer target_index = hash_map.get(hash);
int current_index = request.getPieceNumber();
int piece_size = disk_manager.getPieceLength(current_index);
if (target_index != null && target_index != current_index && disk_manager.getPieceLength(target_index) == piece_size && !disk_manager.isDone(target_index)) {
final AESemaphore sem = new AESemaphore("PieceReorder");
disk_manager.enqueueReadRequest(disk_manager.createReadRequest(current_index, 0, piece_size), new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
try {
disk_manager.enqueueWriteRequest(disk_manager.createWriteRequest(target_index, 0, data, null), new DiskManagerWriteRequestListener() {
@Override
public void writeCompleted(DiskManagerWriteRequest request) {
try {
DiskManagerCheckRequest check_request = disk_manager.createCheckRequest(target_index, null);
check_request.setLowPriority(true);
checker.enqueueCheckRequest(check_request, new DiskManagerCheckRequestListener() {
@Override
public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
sem.release();
}
@Override
public void checkCancelled(DiskManagerCheckRequest request) {
sem.release();
}
@Override
public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
sem.release();
}
});
} catch (Throwable e) {
sem.release();
}
}
@Override
public void readFailed(DiskManagerReadRequest request, Throwable cause) {
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
}
}
}
}
} finally {
check_in_progress = false;
}
if (!(stopped || resume_data_complete)) {
try {
saveResumeData(true);
} catch (Exception e) {
Debug.out("Failed to dump initial resume data to disk");
Debug.printStackTrace(e);
}
}
} catch (Throwable e) {
// if something went wrong then log and continue.
Debug.printStackTrace(e);
} finally {
recheck_inst.unregister();
// System.out.println( "Check of '" + disk_manager.getDownloadManager().getDisplayName() + "' completed in " + (System.currentTimeMillis() - start));
}
}
Aggregations