use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DiskManagerImpl method allocateFiles.
private int[] allocateFiles() {
int[] fail_result = { -1, -1 };
Set file_set = new HashSet();
DMPieceMapperFile[] pm_files = piece_mapper.getFiles();
DiskManagerFileInfoImpl[] allocated_files = new DiskManagerFileInfoImpl[pm_files.length];
DownloadManagerState state = download_manager.getDownloadState();
try {
allocation_scheduler.register(this);
setState(ALLOCATING);
allocated = 0;
int numNewFiles = 0;
int notRequiredFiles = 0;
String root_dir = download_manager.getAbsoluteSaveLocation().getParent();
if (!torrent.isSimpleTorrent()) {
root_dir += File.separator + download_manager.getAbsoluteSaveLocation().getName();
}
root_dir += File.separator;
String[] storage_types = getStorageTypes();
String incomplete_suffix = state.getAttribute(DownloadManagerState.AT_INCOMP_FILE_SUFFIX);
for (int i = 0; i < pm_files.length; i++) {
if (stopping) {
this.errorMessage = "File allocation interrupted - download is stopping";
setState(FAULTY);
return (fail_result);
}
final DMPieceMapperFile pm_info = pm_files[i];
final long target_length = pm_info.getLength();
File relative_data_file = pm_info.getDataFile();
DiskManagerFileInfoImpl fileInfo;
try {
int storage_type = DiskManagerUtil.convertDMStorageTypeFromString(storage_types[i]);
fileInfo = createFileInfo(state, pm_info, i, root_dir, relative_data_file, storage_type);
allocated_files[i] = fileInfo;
pm_info.setFileInfo(fileInfo);
} catch (Exception e) {
this.errorMessage = Debug.getNestedExceptionMessage(e) + " (allocateFiles:" + relative_data_file.toString() + ")";
setState(FAULTY);
return (fail_result);
}
CacheFile cache_file = fileInfo.getCacheFile();
File data_file = fileInfo.getFile(true);
String file_key = data_file.getAbsolutePath();
if (Constants.isWindows) {
file_key = file_key.toLowerCase();
}
if (file_set.contains(file_key)) {
this.errorMessage = "File occurs more than once in download: " + data_file.toString() + ".\nRename one of the files in Files view via the right-click menu.";
setState(FAULTY);
return (fail_result);
}
file_set.add(file_key);
String ext = data_file.getName();
if (incomplete_suffix != null && ext.endsWith(incomplete_suffix)) {
ext = ext.substring(0, ext.length() - incomplete_suffix.length());
}
int separator = ext.lastIndexOf(".");
if (separator == -1) {
separator = 0;
}
fileInfo.setExtension(ext.substring(separator));
// Added for Feature Request
// [ 807483 ] Prioritize .nfo files in new torrents
// Implemented a more general way of dealing with it.
String extensions = COConfigurationManager.getStringParameter("priorityExtensions", "");
if (!extensions.equals("")) {
boolean bIgnoreCase = COConfigurationManager.getBooleanParameter("priorityExtensionsIgnoreCase");
StringTokenizer st = new StringTokenizer(extensions, ";");
while (st.hasMoreTokens()) {
String extension = st.nextToken();
extension = extension.trim();
if (!extension.startsWith("."))
extension = "." + extension;
boolean bHighPriority = (bIgnoreCase) ? fileInfo.getExtension().equalsIgnoreCase(extension) : fileInfo.getExtension().equals(extension);
if (bHighPriority)
fileInfo.setPriority(1);
}
}
fileInfo.setDownloaded(0);
int st = cache_file.getStorageType();
boolean compact = st == CacheFile.CT_COMPACT || st == CacheFile.CT_PIECE_REORDER_COMPACT;
boolean mustExistOrAllocate = (!compact) || RDResumeHandler.fileMustExist(download_manager, fileInfo);
if (!mustExistOrAllocate && cache_file.exists()) {
data_file.delete();
}
if (cache_file.exists()) {
try {
// make sure the existing file length isn't too large
long existing_length = fileInfo.getCacheFile().getLength();
if (existing_length > target_length) {
if (COConfigurationManager.getBooleanParameter("File.truncate.if.too.large")) {
fileInfo.setAccessMode(DiskManagerFileInfo.WRITE);
cache_file.setLength(target_length);
fileInfo.setAccessMode(DiskManagerFileInfo.READ);
Debug.out("Existing data file length too large [" + existing_length + ">" + target_length + "]: " + data_file.getAbsolutePath() + ", truncating");
} else {
this.errorMessage = "Existing data file length too large [" + existing_length + ">" + target_length + "]: " + data_file.getAbsolutePath();
setState(FAULTY);
return (fail_result);
}
} else if (existing_length < target_length) {
if (!compact) {
if (!allocateFile(fileInfo, data_file, existing_length, target_length)) {
return (fail_result);
}
}
}
} catch (Throwable e) {
fileAllocFailed(data_file, target_length, false, e);
setState(FAULTY);
return (fail_result);
}
allocated += target_length;
} else if (mustExistOrAllocate) {
if (download_manager.isDataAlreadyAllocated()) {
this.errorMessage = "Data file missing: " + data_file.getAbsolutePath();
setState(FAULTY);
return (fail_result);
}
try {
if (!allocateFile(fileInfo, data_file, -1, target_length)) {
return (fail_result);
}
} catch (Throwable e) {
fileAllocFailed(data_file, target_length, true, e);
setState(FAULTY);
return (fail_result);
}
numNewFiles++;
} else {
notRequiredFiles++;
}
}
// make sure that "files" doens't become visible to the rest of the world until all
// entries have been populated
files = allocated_files;
fileset = new DiskManagerFileInfoSetImpl(files, this);
loadFilePriorities();
download_manager.setDataAlreadyAllocated(true);
return (new int[] { numNewFiles, notRequiredFiles });
} finally {
allocation_scheduler.unregister(this);
if (files == null) {
for (int i = 0; i < allocated_files.length; i++) {
if (allocated_files[i] != null) {
try {
allocated_files[i].getCacheFile().close();
} catch (Throwable e) {
}
}
}
}
}
}
use of com.biglybt.core.diskmanager.cache.CacheFile in project BiglyBT by BiglySoftware.
the class DiskAccessRequestImpl method runAggregated.
protected static void runAggregated(DiskAccessRequestImpl base_request, DiskAccessRequestImpl[] requests) {
// assumption - they are all for the same file, sequential offsets and aggregatable, not cancelled
int op = base_request.getOperation();
CacheFile file = base_request.getFile();
long offset = base_request.getOffset();
short cache_policy = base_request.getCachePolicy();
DirectByteBuffer[] buffers = new DirectByteBuffer[requests.length];
long current_offset = offset;
long total_size = 0;
for (int i = 0; i < buffers.length; i++) {
DiskAccessRequestImpl request = requests[i];
if (current_offset != request.getOffset()) {
Debug.out("assert failed: requests not contiguous");
}
int size = request.getSize();
current_offset += size;
total_size += size;
buffers[i] = request.getBuffer();
}
try {
if (op == OP_READ) {
file.read(buffers, offset, cache_policy);
} else if (op == OP_WRITE) {
file.write(buffers, offset);
} else {
file.writeAndHandoverBuffers(buffers, offset);
}
base_request.getListener().requestExecuted(total_size);
for (int i = 0; i < requests.length; i++) {
DiskAccessRequestImpl request = requests[i];
request.getListener().requestComplete(request);
if (request != base_request) {
request.getListener().requestExecuted(0);
}
}
} catch (CacheFileManagerException e) {
int fail_index = e.getFailIndex();
for (int i = 0; i < fail_index; i++) {
DiskAccessRequestImpl request = requests[i];
request.getListener().requestComplete(request);
}
for (int i = fail_index; i < requests.length; i++) {
DiskAccessRequestImpl request = requests[i];
request.getListener().requestFailed(request, e);
}
} catch (Throwable e) {
for (int i = 0; i < requests.length; i++) {
DiskAccessRequestImpl request = requests[i];
request.getListener().requestFailed(request, e);
}
}
}
Aggregations