use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithCache method readCache.
protected void readCache(final DirectByteBuffer file_buffer, final long file_position, final boolean recursive, final boolean disable_read_cache) throws CacheFileManagerException {
checkPendingException();
final int file_buffer_position = file_buffer.position(SS_CACHE);
final int file_buffer_limit = file_buffer.limit(SS_CACHE);
final int read_length = file_buffer_limit - file_buffer_position;
try {
if (manager.isCacheEnabled()) {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "readCache: " + getName() + ", " + file_position + " - " + (file_position + read_length - 1) + ":" + file_buffer_position + "/" + file_buffer_limit));
if (read_length == 0) {
// nothing to do
return;
}
long writing_file_position = file_position;
int writing_left = read_length;
boolean ok = true;
int used_entries = 0;
long used_read_ahead = 0;
try {
this_mon.enter();
if (read_history == null) {
read_history = new long[READAHEAD_HISTORY];
Arrays.fill(read_history, -1);
}
// record the position of the byte *following* the end of this read
read_history[read_history_next++] = file_position + read_length;
if (read_history_next == READAHEAD_HISTORY) {
read_history_next = 0;
}
Iterator it = cache.iterator();
while (ok && writing_left > 0 && it.hasNext()) {
CacheEntry entry = (CacheEntry) it.next();
long entry_file_position = entry.getFilePosition();
int entry_length = entry.getLength();
if (entry_file_position > writing_file_position) {
// data missing at the start of the read section
ok = false;
break;
} else if (entry_file_position + entry_length <= writing_file_position) {
// not got there yet
} else {
// copy required amount into read buffer
int skip = (int) (writing_file_position - entry_file_position);
int available = entry_length - skip;
if (available > writing_left) {
available = writing_left;
}
DirectByteBuffer entry_buffer = entry.getBuffer();
int entry_buffer_position = entry_buffer.position(SS_CACHE);
int entry_buffer_limit = entry_buffer.limit(SS_CACHE);
try {
entry_buffer.limit(SS_CACHE, entry_buffer_position + skip + available);
entry_buffer.position(SS_CACHE, entry_buffer_position + skip);
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "cacheRead: using " + entry.getString() + "[" + entry_buffer.position(SS_CACHE) + "/" + entry_buffer.limit(SS_CACHE) + "]" + "to write to [" + file_buffer.position(SS_CACHE) + "/" + file_buffer.limit(SS_CACHE) + "]"));
used_entries++;
file_buffer.put(SS_CACHE, entry_buffer);
manager.cacheEntryUsed(entry);
} finally {
entry_buffer.limit(SS_CACHE, entry_buffer_limit);
entry_buffer.position(SS_CACHE, entry_buffer_position);
}
writing_file_position += available;
writing_left -= available;
if (entry.getType() == CacheEntry.CT_READ_AHEAD) {
used_read_ahead += available;
}
}
}
} finally {
if (ok) {
read_ahead_bytes_used += used_read_ahead;
}
this_mon.exit();
}
if (ok && writing_left == 0) {
if (!recursive) {
manager.cacheBytesRead(read_length);
bytes_read += read_length;
}
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "cacheRead: cache use ok [entries = " + used_entries + "]"));
} else {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "cacheRead: cache use fails, reverting to plain read"));
// reset in case we've done some partial reads
file_buffer.position(SS_CACHE, file_buffer_position);
for (int i = 0; i < 2; i++) {
try {
boolean do_read_ahead = // first time round
i == 0 && !recursive && !disable_read_cache && read_history != null && manager.isReadCacheEnabled() && read_length < current_read_ahead_size && file_position + current_read_ahead_size <= file.getLength();
if (do_read_ahead) {
// only read ahead if this is a continuation of a prior read within history
do_read_ahead = false;
for (int j = 0; j < READAHEAD_HISTORY; j++) {
if (read_history[j] == file_position) {
do_read_ahead = true;
break;
}
}
}
int actual_read_ahead = current_read_ahead_size;
if (do_read_ahead) {
// don't read ahead over the end of a piece
int request_piece_offset = (int) ((file_position - piece_offset) % piece_size);
if (request_piece_offset < 0) {
request_piece_offset += piece_size;
}
// System.out.println( "request offset = " + request_piece_offset );
int data_left = piece_size - request_piece_offset;
if (data_left < actual_read_ahead) {
actual_read_ahead = data_left;
if (actual_read_ahead <= read_length) {
do_read_ahead = false;
}
// System.out.println( " trimmed to " + data_left );
}
}
if (do_read_ahead) {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "\tperforming read-ahead"));
DirectByteBuffer cache_buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_CACHE_READ, actual_read_ahead);
boolean buffer_cached = false;
try {
// must allocate space OUTSIDE sync block (see manager for details)
CacheEntry entry = manager.allocateCacheSpace(CacheEntry.CT_READ_AHEAD, this, cache_buffer, file_position, actual_read_ahead);
entry.setClean();
try {
this_mon.enter();
// flush before read so that any bits in cache get re-read correctly on read
flushCache(file_position, actual_read_ahead, true, -1, 0, -1);
getFMFile().read(cache_buffer, file_position);
read_ahead_bytes_made += actual_read_ahead;
manager.fileBytesRead(actual_read_ahead);
bytes_read += actual_read_ahead;
cache_buffer.position(SS_CACHE, 0);
cache.add(entry);
manager.addCacheSpace(entry);
} finally {
this_mon.exit();
}
buffer_cached = true;
} finally {
if (!buffer_cached) {
// if the read operation failed, and hence the buffer
// wasn't added to the cache, then release it here
cache_buffer.returnToPool();
}
}
// recursively read from the cache, should hit the data we just read although
// there is the possibility that it could be flushed before then - hence the
// recursion flag that will avoid this happening next time around
readCache(file_buffer, file_position, true, disable_read_cache);
} else {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "\tnot performing read-ahead"));
try {
this_mon.enter();
flushCache(file_position, read_length, true, -1, 0, -1);
getFMFile().read(file_buffer, file_position);
} finally {
this_mon.exit();
}
manager.fileBytesRead(read_length);
bytes_read += read_length;
}
break;
} catch (CacheFileManagerException e) {
if (i == 1) {
throw (e);
}
} catch (FMFileManagerException e) {
if (i == 1) {
manager.rethrow(this, e);
}
}
}
}
} else {
try {
getFMFile().read(file_buffer, file_position);
manager.fileBytesRead(read_length);
bytes_read += read_length;
} catch (FMFileManagerException e) {
manager.rethrow(this, e);
}
}
} finally {
if (AEDiagnostics.CHECK_DUMMY_FILE_DATA) {
long temp_position = file_position + file_offset_in_torrent;
file_buffer.position(SS_CACHE, file_buffer_position);
while (file_buffer.hasRemaining(SS_CACHE)) {
byte v = file_buffer.get(SS_CACHE);
if ((byte) temp_position != v) {
System.out.println("readCache: read is bad at " + temp_position + ": expected = " + (byte) temp_position + ", actual = " + v);
file_buffer.position(SS_CACHE, file_buffer_limit);
break;
}
temp_position++;
}
}
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithoutCacheMT method renameFile.
@Override
public void renameFile(String new_file) throws CacheFileManagerException {
try {
synchronized (this) {
moving = true;
}
while (true) {
synchronized (this) {
boolean surviving = false;
for (int i = 1; i < files_use_count.length; i++) {
if (files_use_count[i] > 0) {
surviving = true;
break;
}
}
if (!surviving) {
for (int i = 1; i < files_use_count.length; i++) {
FMFile file = files[i];
if (file.isClone()) {
synchronized (CacheFileWithoutCacheMT.class) {
num_clones--;
}
}
file.close();
}
files = new FMFile[] { base_file };
files_use_count = new int[] { files_use_count[0] };
base_file.renameFile(new_file);
break;
}
}
try {
System.out.println("CacheFileWithoutCacheMT: waiting for clones to die");
Thread.sleep(250);
} catch (Throwable e) {
}
}
} catch (FMFileManagerException e) {
manager.rethrow(this, e);
} finally {
synchronized (this) {
moving = false;
}
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithoutCacheMT method getFile.
protected FMFile getFile() throws CacheFileManagerException {
synchronized (this) {
if (moving) {
files_use_count[0]++;
return (files[0]);
}
int min_index = -1;
int min = Integer.MAX_VALUE;
for (int i = 0; i < files_use_count.length; i++) {
int count = files_use_count[i];
if (count < min) {
min = count;
min_index = i;
}
}
if (min == 0 || files_use_count.length == MAX_CLONES) {
files_use_count[min_index]++;
return (files[min_index]);
}
try {
FMFile clone = base_file.createClone();
// System.out.println( "Created clone " + clone.getName());
int old_num = files.length;
int new_num = old_num + 1;
synchronized (CacheFileWithoutCacheMT.class) {
num_clones++;
if (num_clones % 100 == 0) {
// System.out.println( "File clones=" + num_clones );
}
if (new_num == MAX_CLONES || new_num > max_clone_depth) {
max_clone_depth = new_num;
// System.out.println( "Clone depth of " + new_num + " for " + clone.getName());
}
}
FMFile[] new_files = new FMFile[new_num];
int[] new_files_use_count = new int[new_num];
System.arraycopy(files, 0, new_files, 0, old_num);
System.arraycopy(files_use_count, 0, new_files_use_count, 0, old_num);
new_files[old_num] = clone;
new_files_use_count[old_num] = 1;
files = new_files;
files_use_count = new_files_use_count;
return (clone);
} catch (FMFileManagerException e) {
manager.rethrow(this, e);
return (null);
}
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithoutCacheMT method read.
@Override
public void read(DirectByteBuffer buffer, long position, short policy) throws CacheFileManagerException {
int read_length = buffer.remaining(DirectByteBuffer.SS_CACHE);
FMFile file = null;
try {
file = getFile();
file.read(buffer, position);
manager.fileBytesRead(read_length);
bytes_read += read_length;
} catch (FMFileManagerException e) {
manager.rethrow(this, e);
} finally {
releaseFile(file);
}
}
use of com.biglybt.core.diskmanager.file.FMFileManagerException in project BiglyBT by BiglySoftware.
the class CacheFileWithoutCacheMT method close.
@Override
public void close() throws CacheFileManagerException {
try {
synchronized (this) {
for (int i = 0; i < files.length; i++) {
FMFile file = files[i];
if (file.isClone()) {
synchronized (CacheFileWithoutCacheMT.class) {
num_clones--;
}
}
file.close();
}
}
} catch (FMFileManagerException e) {
manager.rethrow(this, e);
}
}
Aggregations