use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class FMFileAccessPieceReorderer method recoverConfig.
protected static void recoverConfig(TOTorrentFile torrent_file, File data_file, File config_file, int storage_type) throws FMFileManagerException {
// most likely add-for-seeding which means a recheck will occur. just map all existing pieces
// to their correct positions and let the recheck sort things out
int first_piece_number = torrent_file.getFirstPieceNumber();
int num_pieces = torrent_file.getLastPieceNumber() - first_piece_number + 1;
int piece_size = (int) torrent_file.getTorrent().getPieceLength();
int[] piece_map = new int[num_pieces];
Arrays.fill(piece_map, -1);
piece_map[0] = 0;
long current_length = data_file.length();
int piece_count = (int) ((current_length + piece_size - 1) / piece_size) + 1;
if (piece_count > num_pieces) {
piece_count = num_pieces;
}
for (int i = 1; i < piece_count; i++) {
piece_map[i] = i;
}
int next_piece_index = piece_count;
Map map = encodeConfig(storage_type, current_length, next_piece_index, piece_map);
File control_dir = config_file.getParentFile();
if (!control_dir.exists()) {
control_dir.mkdirs();
}
if (!FileUtil.writeResilientFileWithResult(control_dir, config_file.getName(), map)) {
throw (new FMFileManagerException("Failed to write control file " + config_file.getAbsolutePath()));
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class FMFileAccessPieceReorderer method writeConfig.
private void writeConfig() throws FMFileManagerException {
if (piece_map == null) {
readConfig();
}
Map map = encodeConfig(storage_type, current_length, next_piece_index, piece_map);
if (!control_dir.exists()) {
control_dir.mkdirs();
}
if (!FileUtil.writeResilientFileWithResult(control_dir, control_file, map)) {
throw (new FMFileManagerException("Failed to write control file " + FileUtil.newFile(control_dir, control_file).getAbsolutePath()));
}
if (TRACE) {
System.out.println("WriteConfig: length=" + current_length + ", next=" + next_piece_index);
}
dirt_state = DIRT_CLEAN;
dirt_time = -1;
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class FMFileAccessPieceReorderer method setPieceComplete.
@Override
public void setPieceComplete(FileAccessor fa, int piece_number, DirectByteBuffer piece_data) throws FMFileManagerException {
if (num_pieces >= MIN_PIECES_REORDERABLE) {
// note that it is possible to reduce the number of piece moves at the expense
// of complicating the allocation process. We have the advantage here of having
// the piece data already in memory. We also don't want to defer a mass of IO
// until the download completes, hence interfering with other stuff such as
// streaming. So I'm going to stick with this approach.
piece_number = piece_number - first_piece_number;
if (TRACE) {
System.out.println("pieceComplete: " + piece_number);
}
if (piece_number >= next_piece_index) {
return;
}
int store_index = getPieceIndex(fa, piece_number, false);
if (store_index == -1) {
throw (new FMFileManagerException("piece marked as complete but not yet allocated"));
}
if (piece_number == store_index) {
if (TRACE) {
System.out.println(" already in right place");
}
return;
}
// find out what's currently stored in the place this piece should be
int swap_piece_number = piece_reverse_map[piece_number];
if (swap_piece_number < 1) {
throw (new FMFileManagerException("Inconsistent: failed to find piece to swap"));
}
if (TRACE) {
System.out.println(" swapping " + piece_number + " and " + swap_piece_number + ": " + piece_number + " <-> " + store_index);
}
DirectByteBuffer temp_buffer = DirectByteBufferPool.getBuffer(SS_FILE, piece_size);
DirectByteBuffer[] temp_buffers = new DirectByteBuffer[] { temp_buffer };
try {
long store_offset = first_piece_length + ((store_index - 1) * (long) piece_size);
long swap_offset = first_piece_length + ((piece_number - 1) * (long) piece_size);
delegate.read(fa, temp_buffers, swap_offset);
piece_data.position(SS_FILE, 0);
delegate.write(fa, new DirectByteBuffer[] { piece_data }, swap_offset);
temp_buffer.position(SS_FILE, 0);
delegate.write(fa, temp_buffers, store_offset);
piece_map[piece_number] = piece_number;
piece_reverse_map[piece_number] = piece_number;
piece_map[swap_piece_number] = store_index;
piece_reverse_map[store_index] = swap_piece_number;
setDirty();
if (piece_number == num_pieces - 1) {
long file_length = swap_offset + last_piece_length;
if (delegate.getLength(fa) > file_length) {
if (TRACE) {
System.out.println(" truncating file to correct length of " + file_length);
}
delegate.setLength(fa, file_length);
}
}
} finally {
temp_buffer.returnToPool();
}
} else {
delegate.setPieceComplete(fa, piece_number, piece_data);
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithCache method multiBlockFlush.
protected void multiBlockFlush(List multi_block_entries, long multi_block_start, long multi_block_next, boolean release_entries) throws CacheFileManagerException {
boolean write_ok = false;
try {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "multiBlockFlush: writing " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "," + release_entries + "]"));
DirectByteBuffer[] buffers = new DirectByteBuffer[multi_block_entries.size()];
long expected_per_entry_write = 0;
for (int i = 0; i < buffers.length; i++) {
CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
// sanitity check - we should always be flushing entire entries
DirectByteBuffer buffer = entry.getBuffer();
if (buffer.limit(SS_CACHE) - buffer.position(SS_CACHE) != entry.getLength()) {
throw (new CacheFileManagerException(this, "flush: inconsistent entry length, position wrong"));
}
expected_per_entry_write += entry.getLength();
buffers[i] = buffer;
}
long expected_overall_write = multi_block_next - multi_block_start;
if (expected_per_entry_write != expected_overall_write) {
throw (new CacheFileManagerException(this, "flush: inconsistent write length, entrys = " + expected_per_entry_write + " overall = " + expected_overall_write));
}
getFMFile().write(buffers, multi_block_start);
manager.fileBytesWritten(expected_overall_write);
// bytes_written += expected_overall_write;
write_ok = true;
} catch (FMFileManagerException e) {
throw (new CacheFileManagerException(this, "flush fails", e));
} finally {
for (int i = 0; i < multi_block_entries.size(); i++) {
CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
if (release_entries) {
manager.releaseCacheSpace(entry);
} else {
entry.resetBufferPosition();
if (write_ok) {
entry.setClean();
}
}
}
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithCache method writeCache.
protected void writeCache(DirectByteBuffer file_buffer, long file_position, boolean buffer_handed_over) throws CacheFileManagerException {
checkPendingException();
boolean buffer_cached = false;
boolean failed = false;
try {
int file_buffer_position = file_buffer.position(SS_CACHE);
int file_buffer_limit = file_buffer.limit(SS_CACHE);
int write_length = file_buffer_limit - file_buffer_position;
if (write_length == 0) {
// nothing to do
return;
}
if (AEDiagnostics.CHECK_DUMMY_FILE_DATA) {
long temp_position = file_position + file_offset_in_torrent;
while (file_buffer.hasRemaining(SS_CACHE)) {
byte v = file_buffer.get(SS_CACHE);
if ((byte) temp_position != v) {
System.out.println("writeCache: write is bad at " + temp_position + ": expected = " + (byte) temp_position + ", actual = " + v);
break;
}
temp_position++;
}
file_buffer.position(SS_CACHE, file_buffer_position);
}
if (manager.isWriteCacheEnabled()) {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "writeCache: " + getName() + ", " + file_position + " - " + (file_position + write_length - 1) + ":" + file_buffer_position + "/" + file_buffer_limit));
if ((!buffer_handed_over) && write_length < piece_size) {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, " making copy of non-handedover buffer"));
DirectByteBuffer cache_buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_CACHE_WRITE, write_length);
cache_buffer.put(SS_CACHE, file_buffer);
cache_buffer.position(SS_CACHE, 0);
// make it look like this buffer has been handed over
file_buffer = cache_buffer;
file_buffer_position = 0;
file_buffer_limit = write_length;
buffer_handed_over = true;
}
if (buffer_handed_over) {
// cache this write, allocate outside sync block (see manager for details)
CacheEntry entry = manager.allocateCacheSpace(CacheEntry.CT_DATA_WRITE, this, file_buffer, file_position, write_length);
try {
this_mon.enter();
if (access_mode != CF_WRITE) {
throw (new CacheFileManagerException(this, "Write failed - cache file is read only"));
}
// if we are overwriting stuff already in the cache then force-write overlapped
// data (easiest solution as this should only occur on hash-fails)
// do the flush and add sychronized to avoid possibility of another
// thread getting in-between and adding same block thus causing mutiple entries
// for same space
flushCache(file_position, write_length, true, -1, 0, -1);
cache.add(entry);
manager.addCacheSpace(entry);
} finally {
this_mon.exit();
}
manager.cacheBytesWritten(write_length);
bytes_written += write_length;
buffer_cached = true;
} else {
try {
this_mon.enter();
flushCache(file_position, write_length, true, -1, 0, -1);
getFMFile().write(file_buffer, file_position);
} finally {
this_mon.exit();
}
manager.fileBytesWritten(write_length);
bytes_written += write_length;
}
} else {
getFMFile().write(file_buffer, file_position);
manager.fileBytesWritten(write_length);
bytes_written += write_length;
}
} catch (CacheFileManagerException e) {
failed = true;
throw (e);
} catch (FMFileManagerException e) {
failed = true;
manager.rethrow(this, e);
} finally {
if (buffer_handed_over) {
if (!(failed || buffer_cached)) {
file_buffer.returnToPool();
}
}
}
}
Aggregations