use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class LWSDiskManager method getFileInfo.
protected DiskManagerFileInfoImpl[] getFileInfo(DMPieceMapperFile[] pm_files, File save_location) {
boolean ok = false;
DiskManagerFileInfoImpl[] local_files = new DiskManagerFileInfoImpl[pm_files.length];
try {
TOTorrent torrent = lws.getTOTorrent(false);
if (torrent.isSimpleTorrent()) {
save_location = save_location.getParentFile();
}
for (int i = 0; i < pm_files.length; i++) {
DMPieceMapperFile pm_info = pm_files[i];
File relative_file = pm_info.getDataFile();
long target_length = pm_info.getLength();
DiskManagerFileInfoImpl file_info = new DiskManagerFileInfoImpl(this, save_location.toString(), relative_file, i, pm_info.getTorrentFile(), DiskManagerFileInfo.ST_LINEAR);
local_files[i] = file_info;
CacheFile cache_file = file_info.getCacheFile();
File data_file = file_info.getFile(true);
if (!cache_file.exists()) {
throw (new Exception("File '" + data_file + "' doesn't exist"));
}
if (cache_file.getLength() != target_length) {
throw (new Exception("File '" + data_file + "' doesn't exist"));
}
pm_info.setFileInfo(file_info);
}
ok = true;
return (local_files);
} catch (Throwable e) {
setFailed("getFiles failed - " + Debug.getNestedExceptionMessage(e));
return (null);
} finally {
if (!ok) {
for (int i = 0; i < local_files.length; i++) {
if (local_files[i] != null) {
local_files[i].close();
}
}
}
}
}
use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DiskManagerUtil method getFileInfoSkeleton.
public static DiskManagerFileInfoSet getFileInfoSkeleton(final DownloadManager download_manager, final DiskManagerListener listener) {
final TOTorrent torrent = download_manager.getTorrent();
if (torrent == null) {
return (new DiskManagerFileInfoSetImpl(new DiskManagerFileInfoImpl[0], null));
}
String tempRootDir = download_manager.getAbsoluteSaveLocation().getParent();
if (// in case we alraedy are at the root
tempRootDir == null)
tempRootDir = download_manager.getAbsoluteSaveLocation().getPath();
if (!torrent.isSimpleTorrent()) {
tempRootDir += File.separator + download_manager.getAbsoluteSaveLocation().getName();
}
tempRootDir += File.separator;
// prevent attempted state saves and associated nastyness during population of
// the file skeleton entries
final boolean[] loading = { true };
try {
final String root_dir = StringInterner.intern(tempRootDir);
try {
final LocaleUtilDecoder locale_decoder = LocaleTorrentUtil.getTorrentEncoding(torrent);
TOTorrentFile[] torrent_files = torrent.getFiles();
final FileSkeleton[] res = new FileSkeleton[torrent_files.length];
final String incomplete_suffix = download_manager.getDownloadState().getAttribute(DownloadManagerState.AT_INCOMP_FILE_SUFFIX);
final DiskManagerFileInfoSet fileSetSkeleton = new DiskManagerFileInfoSet() {
@Override
public DiskManagerFileInfo[] getFiles() {
return res;
}
@Override
public int nbFiles() {
return res.length;
}
@Override
public void setPriority(int[] toChange) {
if (toChange.length != res.length)
throw new IllegalArgumentException("array length mismatches the number of files");
for (int i = 0; i < res.length; i++) res[i].priority = toChange[i];
if (!loading[0]) {
DiskManagerImpl.storeFilePriorities(download_manager, res);
}
for (int i = 0; i < res.length; i++) if (toChange[i] != 0)
listener.filePriorityChanged(res[i]);
}
@Override
public void setSkipped(boolean[] toChange, boolean setSkipped) {
if (toChange.length != res.length)
throw new IllegalArgumentException("array length mismatches the number of files");
if (!setSkipped) {
String[] types = DiskManagerImpl.getStorageTypes(download_manager);
boolean[] toLinear = new boolean[toChange.length];
boolean[] toReorder = new boolean[toChange.length];
int num_linear = 0;
int num_reorder = 0;
for (int i = 0; i < toChange.length; i++) {
if (toChange[i]) {
int old_type = DiskManagerUtil.convertDMStorageTypeFromString(types[i]);
if (old_type == DiskManagerFileInfo.ST_COMPACT) {
toLinear[i] = true;
num_linear++;
} else if (old_type == DiskManagerFileInfo.ST_REORDER_COMPACT) {
toReorder[i] = true;
num_reorder++;
}
}
}
if (num_linear > 0) {
if (!Arrays.equals(toLinear, setStorageTypes(toLinear, DiskManagerFileInfo.ST_LINEAR))) {
return;
}
}
if (num_reorder > 0) {
if (!Arrays.equals(toReorder, setStorageTypes(toReorder, DiskManagerFileInfo.ST_REORDER))) {
return;
}
}
}
File[] to_link = new File[res.length];
for (int i = 0; i < res.length; i++) {
if (toChange[i]) {
to_link[i] = res[i].setSkippedInternal(setSkipped);
}
}
if (!loading[0]) {
DiskManagerImpl.storeFilePriorities(download_manager, res);
}
List<Integer> from_indexes = new ArrayList<>();
List<File> from_links = new ArrayList<>();
List<File> to_links = new ArrayList<>();
for (int i = 0; i < res.length; i++) {
if (to_link[i] != null) {
from_indexes.add(i);
from_links.add(res[i].getFile(false));
to_links.add(to_link[i]);
}
}
if (from_links.size() > 0) {
download_manager.getDownloadState().setFileLinks(from_indexes, from_links, to_links);
}
if (!setSkipped) {
doFileExistenceChecks(this, toChange, download_manager, true);
}
for (int i = 0; i < res.length; i++) {
if (toChange[i]) {
listener.filePriorityChanged(res[i]);
}
}
}
@Override
public boolean[] setStorageTypes(boolean[] toChange, int newStorageType) {
if (toChange.length != res.length)
throw new IllegalArgumentException("array length mismatches the number of files");
String[] types = DiskManagerImpl.getStorageTypes(download_manager);
boolean[] modified = new boolean[res.length];
boolean[] toSkip = new boolean[res.length];
int toSkipCount = 0;
DownloadManagerState dmState = download_manager.getDownloadState();
try {
dmState.suppressStateSave(true);
for (int i = 0; i < res.length; i++) {
if (!toChange[i])
continue;
final int idx = i;
int old_type = DiskManagerUtil.convertDMStorageTypeFromString(types[i]);
if (newStorageType == old_type) {
modified[i] = true;
continue;
}
try {
File target_file = res[i].getFile(true);
if (target_file.exists()) {
CacheFile cache_file = CacheFileManagerFactory.getSingleton().createFile(new CacheFileOwner() {
@Override
public String getCacheFileOwnerName() {
return (download_manager.getInternalName());
}
@Override
public TOTorrentFile getCacheFileTorrentFile() {
return (res[idx].getTorrentFile());
}
@Override
public File getCacheFileControlFileDir() {
return (download_manager.getDownloadState().getStateFile());
}
@Override
public int getCacheMode() {
return (CacheFileOwner.CACHE_MODE_NORMAL);
}
}, target_file, DiskManagerUtil.convertDMStorageTypeToCache(newStorageType));
// need this to trigger recovery for re-order files :(
cache_file.getLength();
cache_file.close();
}
toSkip[i] = (newStorageType == FileSkeleton.ST_COMPACT || newStorageType == FileSkeleton.ST_REORDER_COMPACT) && !res[i].isSkipped();
if (toSkip[i]) {
toSkipCount++;
}
modified[i] = true;
} catch (Throwable e) {
Debug.printStackTrace(e);
Logger.log(new LogAlert(download_manager, LogAlert.REPEATABLE, LogAlert.AT_ERROR, "Failed to change storage type for '" + res[i].getFile(true) + "': " + Debug.getNestedExceptionMessage(e)));
// download's not running - tag for recheck
RDResumeHandler.recheckFile(download_manager, res[i]);
}
types[i] = DiskManagerUtil.convertDMStorageTypeToString(newStorageType);
}
/*
* set storage type and skipped before we do piece clearing and file
* clearing checks as those checks work better when skipped/stype is set
* properly
*/
dmState.setListAttribute(DownloadManagerState.AT_FILE_STORE_TYPES, types);
if (toSkipCount > 0) {
setSkipped(toSkip, true);
}
for (int i = 0; i < res.length; i++) {
if (!toChange[i])
continue;
// download's not running, update resume data as necessary
int cleared = RDResumeHandler.storageTypeChanged(download_manager, res[i]);
if (cleared > 0) {
res[i].downloaded = res[i].downloaded - cleared * res[i].getTorrentFile().getTorrent().getPieceLength();
if (res[i].downloaded < 0)
res[i].downloaded = 0;
}
}
DiskManagerImpl.storeFileDownloaded(download_manager, res, true);
doFileExistenceChecks(this, toChange, download_manager, newStorageType == FileSkeleton.ST_LINEAR || newStorageType == FileSkeleton.ST_REORDER);
} finally {
dmState.suppressStateSave(false);
dmState.save();
}
return modified;
}
};
for (int i = 0; i < res.length; i++) {
final TOTorrentFile torrent_file = torrent_files[i];
final int file_index = i;
FileSkeleton info = new FileSkeleton() {
private volatile CacheFile read_cache_file;
// do not access this field directly, use lazyGetFile() instead
private WeakReference dataFile = new WeakReference(null);
@Override
public void setPriority(int b) {
priority = b;
DiskManagerImpl.storeFilePriorities(download_manager, res);
listener.filePriorityChanged(this);
}
@Override
public void setSkipped(boolean _skipped) {
if (!_skipped && getStorageType() == ST_COMPACT) {
if (!setStorageType(ST_LINEAR)) {
return;
}
}
if (!_skipped && getStorageType() == ST_REORDER_COMPACT) {
if (!setStorageType(ST_REORDER)) {
return;
}
}
File to_link = setSkippedInternal(_skipped);
DiskManagerImpl.storeFilePriorities(download_manager, res);
if (to_link != null) {
download_manager.getDownloadState().setFileLink(file_index, getFile(false), to_link);
}
if (!_skipped) {
boolean[] toCheck = new boolean[fileSetSkeleton.nbFiles()];
toCheck[file_index] = true;
doFileExistenceChecks(fileSetSkeleton, toCheck, download_manager, true);
}
listener.filePriorityChanged(this);
}
@Override
public int getAccessMode() {
return (READ);
}
@Override
public long getDownloaded() {
return (downloaded);
}
@Override
public long getLastModified() {
return (getFile(true).lastModified());
}
@Override
public void setDownloaded(long l) {
downloaded = l;
}
@Override
public String getExtension() {
String ext = lazyGetFile().getName();
if (incomplete_suffix != null && ext.endsWith(incomplete_suffix)) {
ext = ext.substring(0, ext.length() - incomplete_suffix.length());
}
int separator = ext.lastIndexOf(".");
if (separator == -1)
separator = 0;
return ext.substring(separator);
}
@Override
public int getFirstPieceNumber() {
return (torrent_file.getFirstPieceNumber());
}
@Override
public int getLastPieceNumber() {
return (torrent_file.getLastPieceNumber());
}
@Override
public long getLength() {
return (torrent_file.getLength());
}
@Override
public int getIndex() {
return (file_index);
}
@Override
public int getNbPieces() {
return (torrent_file.getNumberOfPieces());
}
@Override
public int getPriority() {
return (priority);
}
@Override
protected File setSkippedInternal(boolean _skipped) {
// returns the file to link to if linkage is required
skipped_internal = _skipped;
if (!download_manager.isDestroyed()) {
DownloadManagerState dm_state = download_manager.getDownloadState();
String dnd_sf = dm_state.getAttribute(DownloadManagerState.AT_DND_SUBFOLDER);
if (dnd_sf != null) {
File link = getLink();
File file = getFile(false);
if (_skipped) {
if (link == null || link.equals(file)) {
File parent = file.getParentFile();
if (parent != null) {
String prefix = dm_state.getAttribute(DownloadManagerState.AT_DND_PREFIX);
String file_name = file.getName();
if (prefix != null && !file_name.startsWith(prefix)) {
file_name = prefix + file_name;
}
File new_parent = new File(parent, dnd_sf);
File new_file = new File(new_parent, file_name);
if (!new_file.exists()) {
if (!new_parent.exists()) {
new_parent.mkdirs();
}
if (new_parent.canWrite()) {
boolean ok;
if (file.exists()) {
ok = FileUtil.renameFile(file, new_file);
} else {
ok = true;
}
if (ok) {
return (new_file);
}
}
}
}
}
} else {
if (link != null && !file.exists()) {
File parent = file.getParentFile();
if (parent != null && parent.canWrite()) {
File new_parent = parent.getName().equals(dnd_sf) ? parent : new File(parent, dnd_sf);
// use link name to handle incomplete file suffix if set
File new_file = new File(new_parent, link.getName());
if (new_file.equals(link)) {
boolean ok;
String incomp_ext = dm_state.getAttribute(DownloadManagerState.AT_INCOMP_FILE_SUFFIX);
String file_name = file.getName();
String prefix = dm_state.getAttribute(DownloadManagerState.AT_DND_PREFIX);
boolean prefix_removed = false;
if (prefix != null && file_name.startsWith(prefix)) {
file_name = file_name.substring(prefix.length());
prefix_removed = true;
}
if (incomp_ext != null && incomp_ext.length() > 0 && getDownloaded() != getLength()) {
if (prefix == null) {
prefix = "";
}
file = new File(file.getParentFile(), prefix + file_name + incomp_ext);
} else if (prefix_removed) {
file = new File(file.getParentFile(), file_name);
}
if (new_file.exists()) {
ok = FileUtil.renameFile(new_file, file);
} else {
ok = true;
}
if (ok) {
File[] files = new_parent.listFiles();
if (files != null && files.length == 0) {
new_parent.delete();
}
return (file);
}
}
}
}
}
}
}
return (null);
}
@Override
public boolean isSkipped() {
return (skipped_internal);
}
@Override
public DiskManager getDiskManager() {
return (null);
}
@Override
public DownloadManager getDownloadManager() {
return (download_manager);
}
@Override
public File getFile(boolean follow_link) {
if (follow_link) {
File link = getLink();
if (link != null) {
return (link);
}
}
return lazyGetFile();
}
private File lazyGetFile() {
File toReturn = (File) dataFile.get();
if (toReturn != null)
return toReturn;
TOTorrent tor = download_manager.getTorrent();
String path_str = root_dir;
File simpleFile = null;
if (tor == null || tor.isSimpleTorrent()) {
// rumour has it tor can sometimes be null
simpleFile = download_manager.getAbsoluteSaveLocation();
} else {
byte[][] path_comps = torrent_file.getPathComponents();
for (int j = 0; j < path_comps.length; j++) {
String comp;
try {
comp = locale_decoder.decodeString(path_comps[j]);
} catch (UnsupportedEncodingException e) {
Debug.printStackTrace(e);
comp = "undecodableFileName" + file_index;
}
comp = FileUtil.convertOSSpecificChars(comp, j != path_comps.length - 1);
path_str += (j == 0 ? "" : File.separator) + comp;
}
}
dataFile = new WeakReference(toReturn = simpleFile != null ? simpleFile : new File(path_str));
// System.out.println("new file:"+toReturn);
return toReturn;
}
@Override
public TOTorrentFile getTorrentFile() {
return (torrent_file);
}
@Override
public boolean setLink(File link_destination) {
/**
* If we a simple torrent, then we'll redirect the call to the download and move the
* data files that way - that'll keep everything in sync.
*/
if (download_manager.getTorrent().isSimpleTorrent()) {
try {
download_manager.moveDataFiles(link_destination.getParentFile(), link_destination.getName());
return true;
} catch (DownloadManagerException e) {
// What should we do with the error?
return false;
}
}
return setLinkAtomic(link_destination);
}
@Override
public boolean setLinkAtomic(File link_destination) {
return (setFileLink(download_manager, res, this, lazyGetFile(), link_destination, null));
}
@Override
public boolean setLinkAtomic(File link_destination, FileUtil.ProgressListener pl) {
return (setFileLink(download_manager, res, this, lazyGetFile(), link_destination, pl));
}
@Override
public File getLink() {
return (download_manager.getDownloadState().getFileLink(file_index, lazyGetFile()));
}
@Override
public boolean setStorageType(int type) {
boolean[] change = new boolean[res.length];
change[file_index] = true;
return fileSetSkeleton.setStorageTypes(change, type)[file_index];
}
@Override
public int getStorageType() {
return (DiskManagerUtil.convertDMStorageTypeFromString(DiskManagerImpl.getStorageType(download_manager, file_index)));
}
@Override
public void flushCache() {
}
@Override
public DirectByteBuffer read(long offset, int length) throws IOException {
CacheFile temp;
try {
cache_read_mon.enter();
if (read_cache_file == null) {
try {
int type = convertDMStorageTypeFromString(DiskManagerImpl.getStorageType(download_manager, file_index));
read_cache_file = CacheFileManagerFactory.getSingleton().createFile(new CacheFileOwner() {
@Override
public String getCacheFileOwnerName() {
return (download_manager.getInternalName());
}
@Override
public TOTorrentFile getCacheFileTorrentFile() {
return (torrent_file);
}
@Override
public File getCacheFileControlFileDir() {
return (download_manager.getDownloadState().getStateFile());
}
@Override
public int getCacheMode() {
return (CacheFileOwner.CACHE_MODE_NORMAL);
}
}, getFile(true), convertDMStorageTypeToCache(type));
} catch (Throwable e) {
Debug.printStackTrace(e);
throw (new IOException(e.getMessage()));
}
}
temp = read_cache_file;
} finally {
cache_read_mon.exit();
}
DirectByteBuffer buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_DM_READ, length);
try {
temp.read(buffer, offset, CacheFile.CP_READ_CACHE);
} catch (Throwable e) {
buffer.returnToPool();
Debug.printStackTrace(e);
throw (new IOException(e.getMessage()));
}
return (buffer);
}
@Override
public int getReadBytesPerSecond() {
CacheFile temp = read_cache_file;
if (temp == null) {
return (0);
}
return (0);
}
@Override
public int getWriteBytesPerSecond() {
return (0);
}
@Override
public long getETA() {
return (-1);
}
@Override
public void close() {
CacheFile temp;
try {
cache_read_mon.enter();
temp = read_cache_file;
read_cache_file = null;
} finally {
cache_read_mon.exit();
}
if (temp != null) {
try {
temp.close();
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
}
@Override
public void addListener(DiskManagerFileInfoListener listener) {
if (getDownloaded() == getLength()) {
try {
listener.dataWritten(0, getLength());
listener.dataChecked(0, getLength());
} catch (Throwable e) {
Debug.printStackTrace(e);
}
}
}
@Override
public void removeListener(DiskManagerFileInfoListener listener) {
}
};
res[i] = info;
}
loadFilePriorities(download_manager, fileSetSkeleton);
loadFileDownloaded(download_manager, res);
return (fileSetSkeleton);
} finally {
loading[0] = false;
}
} catch (Throwable e) {
Debug.printStackTrace(e);
return (new DiskManagerFileInfoSetImpl(new DiskManagerFileInfoImpl[0], null));
}
}
use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DMCheckerImpl method enqueueCheckRequestSupport.
protected void enqueueCheckRequestSupport(final DiskManagerCheckRequest request, final DiskManagerCheckRequestListener listener, boolean read_flush) {
if (!checking_enabled) {
listener.checkCompleted(request, true);
return;
}
final int pieceNumber = request.getPieceNumber();
try {
final byte[] required_hash = disk_manager.getPieceHash(pieceNumber);
// quick check that the files that make up this piece are at least big enough
// to warrant reading the data to check
// also, if the piece is entirely compact then we can immediately
// fail as we don't actually have any data for the piece (or can assume we don't)
// we relax this a bit to catch pieces that are part of compact files with less than
// three pieces as it is possible that these were once complete and have all their bits
// living in retained compact areas
final DMPieceList pieceList = disk_manager.getPieceList(pieceNumber);
try {
// there are other comments in the code about the existence of 0 length piece lists
// just in case these still occur for who knows what reason ensure that a 0 length list
// causes the code to carry on and do the check (i.e. it is no worse that before this
// optimisation was added...)
boolean all_compact = pieceList.size() > 0;
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = piece_entry.getFile();
CacheFile cache_file = file_info.getCacheFile();
if (cache_file.compareLength(piece_entry.getOffset()) < 0) {
listener.checkCompleted(request, false);
return;
}
if (all_compact) {
int st = cache_file.getStorageType();
if ((st != CacheFile.CT_COMPACT && st != CacheFile.CT_PIECE_REORDER_COMPACT) || file_info.getNbPieces() <= 2) {
all_compact = false;
}
}
}
if (all_compact) {
// System.out.println( "Piece " + pieceNumber + " is all compact, failing hash check" );
listener.checkCompleted(request, false);
return;
}
} catch (Throwable e) {
// we can fail here if the disk manager has been stopped as the cache file length access may be being
// performed on a "closed" (i.e. un-owned) file
listener.checkCancelled(request);
return;
}
int this_piece_length = disk_manager.getPieceLength(pieceNumber);
DiskManagerReadRequest read_request = disk_manager.createReadRequest(pieceNumber, 0, this_piece_length);
try {
this_mon.enter();
if (stopped) {
listener.checkCancelled(request);
return;
}
async_reads++;
} finally {
this_mon.exit();
}
read_request.setFlush(read_flush);
read_request.setUseCache(!request.isAdHoc());
disk_manager.enqueueReadRequest(read_request, new DiskManagerReadRequestListener() {
@Override
public void readCompleted(DiskManagerReadRequest read_request, DirectByteBuffer buffer) {
complete();
try {
this_mon.enter();
if (stopped) {
buffer.returnToPool();
listener.checkCancelled(request);
return;
}
async_checks++;
} finally {
this_mon.exit();
}
if (buffer.getFlag(DirectByteBuffer.FL_CONTAINS_TRANSIENT_DATA)) {
try {
buffer.returnToPool();
listener.checkCompleted(request, false);
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
} else {
try {
final DirectByteBuffer f_buffer = buffer;
ConcurrentHasher.getSingleton().addRequest(buffer.getBuffer(DirectByteBuffer.SS_DW), new ConcurrentHasherRequestListener() {
@Override
public void complete(ConcurrentHasherRequest hash_request) {
// cancelled
int async_result = 3;
try {
byte[] actual_hash = hash_request.getResult();
if (actual_hash != null) {
request.setHash(actual_hash);
// success
async_result = 1;
for (int i = 0; i < actual_hash.length; i++) {
if (actual_hash[i] != required_hash[i]) {
// failed;
async_result = 2;
break;
}
}
}
} finally {
try {
if (async_result == 1) {
try {
for (int i = 0; i < pieceList.size(); i++) {
DMPieceMapEntry piece_entry = pieceList.get(i);
DiskManagerFileInfoImpl file_info = piece_entry.getFile();
if (file_info.getLength() > 0 || !file_info.isSkipped()) {
CacheFile cache_file = file_info.getCacheFile();
if (!read_flush && file_info.getStorageType() == DiskManagerFileInfoImpl.ST_REORDER) {
// got to ensure written to disk before setting complete as the re-order
// logic requires this
cache_file.flushCache(piece_entry.getOffset(), piece_entry.getLength());
}
cache_file.setPieceComplete(pieceNumber, f_buffer);
}
}
} catch (Throwable e) {
f_buffer.returnToPool();
Debug.out(e);
listener.checkFailed(request, e);
return;
}
}
f_buffer.returnToPool();
if (async_result == 1) {
listener.checkCompleted(request, true);
} else if (async_result == 2) {
listener.checkCompleted(request, false);
} else {
listener.checkCancelled(request);
}
} finally {
try {
this_mon.enter();
async_checks--;
if (stopped) {
async_check_sem.release();
}
} finally {
this_mon.exit();
}
}
}
}
}, request.isLowPriority());
} catch (Throwable e) {
Debug.printStackTrace(e);
buffer.returnToPool();
listener.checkFailed(request, e);
}
}
}
@Override
public void readFailed(DiskManagerReadRequest read_request, Throwable cause) {
complete();
listener.checkFailed(request, cause);
}
@Override
public int getPriority() {
return (checking_read_priority ? 0 : -1);
}
@Override
public void requestExecuted(long bytes) {
}
protected void complete() {
try {
this_mon.enter();
async_reads--;
if (stopped) {
async_read_sem.release();
}
} finally {
this_mon.exit();
}
}
});
} catch (Throwable e) {
disk_manager.setFailed("Piece check error - " + Debug.getNestedExceptionMessage(e));
Debug.printStackTrace(e);
listener.checkFailed(request, e);
}
}
use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DMWriterImpl method zeroFile.
@Override
public boolean zeroFile(DiskManagerFileInfoImpl file, long length) throws DiskManagerException {
CacheFile cache_file = file.getCacheFile();
try {
if (length == 0) {
// create a zero-length file if it is listed in the torrent
cache_file.setLength(0);
} else {
int buffer_size = pieceLength < MIN_ZERO_BLOCK ? MIN_ZERO_BLOCK : pieceLength;
buffer_size = ((buffer_size + 1023) / 1024) * 1024;
DirectByteBuffer buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_DM_ZERO, buffer_size);
long remainder = length;
long written = 0;
try {
final byte[] blanks = new byte[1024];
for (int i = 0; i < buffer_size / 1024; i++) {
buffer.put(DirectByteBuffer.SS_DW, blanks);
}
buffer.position(DirectByteBuffer.SS_DW, 0);
while (remainder > 0 && !stopped) {
int write_size = buffer_size;
if (remainder < write_size) {
write_size = (int) remainder;
buffer.limit(DirectByteBuffer.SS_DW, write_size);
}
final AESemaphore sem = new AESemaphore("DMW&C:zeroFile");
final Throwable[] op_failed = { null };
disk_access.queueWriteRequest(cache_file, written, buffer, false, new DiskAccessRequestListener() {
@Override
public void requestComplete(DiskAccessRequest request) {
sem.release();
}
@Override
public void requestCancelled(DiskAccessRequest request) {
op_failed[0] = new Throwable("Request cancelled");
sem.release();
}
@Override
public void requestFailed(DiskAccessRequest request, Throwable cause) {
op_failed[0] = cause;
sem.release();
}
@Override
public int getPriority() {
return (-1);
}
@Override
public void requestExecuted(long bytes) {
}
});
sem.reserve();
if (op_failed[0] != null) {
throw (op_failed[0]);
}
buffer.position(DirectByteBuffer.SS_DW, 0);
written += write_size;
remainder -= write_size;
disk_manager.setAllocated(disk_manager.getAllocated() + write_size);
disk_manager.setPercentDone((int) ((disk_manager.getAllocated() * 1000) / totalLength));
}
} finally {
buffer.returnToPool();
}
cache_file.flushCache();
}
if (stopped) {
return false;
}
} catch (Throwable e) {
Debug.printStackTrace(e);
throw new DiskManagerException(e);
}
return true;
}
use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DiskManagerImpl method filesExist.
protected boolean filesExist(String root_dir) {
if (!torrent.isSimpleTorrent()) {
root_dir += File.separator + download_manager.getAbsoluteSaveLocation().getName();
}
if (!root_dir.endsWith(File.separator)) {
root_dir += File.separator;
}
// System.out.println( "root dir = " + root_dir_file );
DMPieceMapperFile[] pm_files = piece_mapper.getFiles();
String[] storage_types = getStorageTypes();
DownloadManagerState state = download_manager.getDownloadState();
for (int i = 0; i < pm_files.length; i++) {
DMPieceMapperFile pm_info = pm_files[i];
File relative_file = pm_info.getDataFile();
long target_length = pm_info.getLength();
// use the cache file to ascertain length in case the caching/writing algorithm
// fiddles with the real length
// Unfortunately we may be called here BEFORE the disk manager has been
// started and hence BEFORE the file info has been setup...
// Maybe one day we could allocate the file info earlier. However, if we do
// this then we'll need to handle the "already moved" stuff too...
DiskManagerFileInfoImpl file_info = pm_info.getFileInfo();
boolean close_it = false;
try {
if (file_info == null) {
int storage_type = DiskManagerUtil.convertDMStorageTypeFromString(storage_types[i]);
file_info = createFileInfo(state, pm_info, i, root_dir, relative_file, storage_type);
close_it = true;
}
try {
CacheFile cache_file = file_info.getCacheFile();
File data_file = file_info.getFile(true);
if (!cache_file.exists()) {
// look for something sensible to report
File current = data_file;
while (!current.exists()) {
File parent = current.getParentFile();
if (parent == null) {
break;
} else if (!parent.exists()) {
current = parent;
} else {
if (parent.isDirectory()) {
errorMessage = current.toString() + " not found.";
} else {
errorMessage = parent.toString() + " is not a directory.";
}
return (false);
}
}
errorMessage = data_file.toString() + " not found.";
return false;
}
// only test for too big as if incremental creation selected
// then too small is OK
long existing_length = file_info.getCacheFile().getLength();
if (existing_length > target_length) {
if (COConfigurationManager.getBooleanParameter("File.truncate.if.too.large")) {
file_info.setAccessMode(DiskManagerFileInfo.WRITE);
file_info.getCacheFile().setLength(target_length);
Debug.out("Existing data file length too large [" + existing_length + ">" + target_length + "]: " + data_file.getAbsolutePath() + ", truncating");
} else {
errorMessage = "Existing data file length too large [" + existing_length + ">" + target_length + "]: " + data_file.getAbsolutePath();
return false;
}
}
} finally {
if (close_it) {
file_info.getCacheFile().close();
}
}
} catch (Throwable e) {
errorMessage = Debug.getNestedExceptionMessage(e) + " (filesExist:" + relative_file.toString() + ")";
return (false);
}
}
return true;
}
Aggregations