Search in sources :

Example 1 with CacheFileManagerException

use of com.biglybt.core.diskmanager.cache.CacheFileManagerException in project BiglyBT by BiglySoftware.

the class RDResumeHandler method checkAllPieces.

public void checkAllPieces(boolean newfiles) {
    // long	start = System.currentTimeMillis();
    DiskManagerRecheckInstance recheck_inst = disk_manager.getRecheckScheduler().register(disk_manager, false);
    int overall_piece_size = disk_manager.getPieceLength();
    final AESemaphore run_sem = new AESemaphore("RDResumeHandler::checkAllPieces:runsem", overall_piece_size > 32 * 1024 * 1024 ? 1 : 2);
    final List<DiskManagerCheckRequest> failed_pieces = new ArrayList<>();
    try {
        boolean resume_data_complete = false;
        try {
            check_in_progress = true;
            boolean resumeEnabled = use_fast_resume;
            if (newfiles) {
                resumeEnabled = false;
            }
            final AESemaphore pending_checks_sem = new AESemaphore("RD:PendingChecks");
            int pending_check_num = 0;
            DiskManagerPiece[] pieces = disk_manager.getPieces();
            // calculate the current file sizes up front for performance reasons
            DiskManagerFileInfo[] files = disk_manager.getFiles();
            Map file_sizes = new HashMap();
            for (int i = 0; i < files.length; i++) {
                try {
                    Long len = new Long(((DiskManagerFileInfoImpl) files[i]).getCacheFile().getLength());
                    file_sizes.put(files[i], len);
                } catch (CacheFileManagerException e) {
                    Debug.printStackTrace(e);
                }
            }
            if (resumeEnabled) {
                boolean resumeValid = false;
                byte[] resume_pieces = null;
                Map partialPieces = null;
                Map resume_data = getResumeData();
                if (resume_data != null) {
                    try {
                        resume_pieces = (byte[]) resume_data.get("resume data");
                        if (resume_pieces != null) {
                            if (resume_pieces.length != pieces.length) {
                                Debug.out("Resume data array length mismatch: " + resume_pieces.length + "/" + pieces.length);
                                resume_pieces = null;
                            }
                        }
                        partialPieces = (Map) resume_data.get("blocks");
                        resumeValid = ((Long) resume_data.get("valid")).intValue() == 1;
                        if (isTorrentResumeDataComplete(disk_manager.getDownloadManager().getDownloadState(), resume_data)) {
                            resume_data_complete = true;
                        } else {
                            // set it so that if we crash the NOT_DONE pieces will be
                            // rechecked
                            resume_data.put("valid", new Long(0));
                            saveResumeData(resume_data);
                        }
                    } catch (Exception ignore) {
                    // ignore.printStackTrace();
                    }
                }
                if (resume_pieces == null) {
                    check_is_full_check = true;
                    resumeValid = false;
                    resume_pieces = new byte[pieces.length];
                    Arrays.fill(resume_pieces, PIECE_RECHECK_REQUIRED);
                }
                check_resume_was_valid = resumeValid;
                boolean recheck_all = use_fast_resume_recheck_all;
                if (!recheck_all) {
                    // override if not much left undone
                    long total_not_done = 0;
                    int piece_size = disk_manager.getPieceLength();
                    for (int i = 0; i < pieces.length; i++) {
                        if (resume_pieces[i] != PIECE_DONE) {
                            total_not_done += piece_size;
                        }
                    }
                    if (total_not_done < 64 * 1024 * 1024) {
                        recheck_all = true;
                    }
                }
                if (Logger.isEnabled()) {
                    int total_not_done = 0;
                    int total_done = 0;
                    int total_started = 0;
                    int total_recheck = 0;
                    for (int i = 0; i < pieces.length; i++) {
                        byte piece_state = resume_pieces[i];
                        if (piece_state == PIECE_NOT_DONE) {
                            total_not_done++;
                        } else if (piece_state == PIECE_DONE) {
                            total_done++;
                        } else if (piece_state == PIECE_STARTED) {
                            total_started++;
                        } else {
                            total_recheck++;
                        }
                    }
                    String str = "valid=" + resumeValid + ",not done=" + total_not_done + ",done=" + total_done + ",started=" + total_started + ",recheck=" + total_recheck + ",rc all=" + recheck_all + ",full=" + check_is_full_check;
                    Logger.log(new LogEvent(disk_manager, LOGID, str));
                }
                for (int i = 0; i < pieces.length; i++) {
                    check_position = i;
                    DiskManagerPiece dm_piece = pieces[i];
                    disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
                    boolean pieceCannotExist = false;
                    byte piece_state = resume_pieces[i];
                    if (piece_state == PIECE_DONE || !resumeValid || recheck_all) {
                        // at least check that file sizes are OK for this piece to be valid
                        DMPieceList list = disk_manager.getPieceList(i);
                        for (int j = 0; j < list.size(); j++) {
                            DMPieceMapEntry entry = list.get(j);
                            Long file_size = (Long) file_sizes.get(entry.getFile());
                            if (file_size == null) {
                                piece_state = PIECE_NOT_DONE;
                                pieceCannotExist = true;
                                if (Logger.isEnabled())
                                    Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is missing, " + "fails re-check."));
                                break;
                            }
                            long expected_size = entry.getOffset() + entry.getLength();
                            if (file_size.longValue() < expected_size) {
                                piece_state = PIECE_NOT_DONE;
                                pieceCannotExist = true;
                                if (Logger.isEnabled())
                                    Logger.log(new LogEvent(disk_manager, LOGID, LogEvent.LT_WARNING, "Piece #" + i + ": file is too small, fails re-check. File size = " + file_size + ", piece needs " + expected_size));
                                break;
                            }
                        }
                    }
                    if (piece_state == PIECE_DONE) {
                        dm_piece.setDone(true);
                    } else if (piece_state == PIECE_NOT_DONE && !recheck_all) {
                    // if the piece isn't done and we haven't been asked to recheck all pieces
                    // on restart (only started pieces) then just set as not done
                    } else {
                        // if the resume data is invalid or explicit recheck needed
                        if (pieceCannotExist) {
                            dm_piece.setDone(false);
                        } else if (piece_state == PIECE_RECHECK_REQUIRED || !resumeValid) {
                            run_sem.reserve();
                            while (!stopped) {
                                if (recheck_inst.getPermission()) {
                                    break;
                                }
                            }
                            if (stopped) {
                                break;
                            } else {
                                try {
                                    DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
                                    request.setLowPriority(true);
                                    checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {

                                        @Override
                                        public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
                                            if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
                                                disk_manager.getPiece(request.getPieceNumber()).setDone(false);
                                                passed = false;
                                            }
                                            if (!passed) {
                                                synchronized (failed_pieces) {
                                                    failed_pieces.add(request);
                                                }
                                            }
                                            complete();
                                        }

                                        @Override
                                        public void checkCancelled(DiskManagerCheckRequest request) {
                                            complete();
                                        }

                                        @Override
                                        public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
                                            complete();
                                        }

                                        protected void complete() {
                                            run_sem.release();
                                            pending_checks_sem.release();
                                        }
                                    });
                                    pending_check_num++;
                                } catch (Throwable e) {
                                    Debug.printStackTrace(e);
                                }
                            }
                        }
                    }
                }
                while (pending_check_num > 0) {
                    pending_checks_sem.reserve();
                    pending_check_num--;
                }
                if (partialPieces != null) {
                    Iterator iter = partialPieces.entrySet().iterator();
                    while (iter.hasNext()) {
                        Map.Entry key = (Map.Entry) iter.next();
                        int pieceNumber = Integer.parseInt((String) key.getKey());
                        DiskManagerPiece dm_piece = pieces[pieceNumber];
                        if (!dm_piece.isDone()) {
                            List blocks = (List) partialPieces.get(key.getKey());
                            Iterator iterBlock = blocks.iterator();
                            while (iterBlock.hasNext()) {
                                dm_piece.setWritten(((Long) iterBlock.next()).intValue());
                            }
                        }
                    }
                }
            } else {
                for (int i = 0; i < pieces.length; i++) {
                    check_position = i;
                    disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces());
                    boolean pieceCannotExist = false;
                    // check if there is an underlying file for this piece, if not set it to not done
                    DMPieceList list = disk_manager.getPieceList(i);
                    for (int j = 0; j < list.size(); j++) {
                        DMPieceMapEntry entry = list.get(j);
                        Long file_size = (Long) file_sizes.get(entry.getFile());
                        if (file_size == null) {
                            pieceCannotExist = true;
                            break;
                        }
                        long expected_size = entry.getOffset() + entry.getLength();
                        if (file_size.longValue() < expected_size) {
                            pieceCannotExist = true;
                            break;
                        }
                    }
                    if (pieceCannotExist) {
                        disk_manager.getPiece(i).setDone(false);
                        continue;
                    }
                    run_sem.reserve();
                    while (!stopped) {
                        if (recheck_inst.getPermission()) {
                            break;
                        }
                    }
                    if (stopped) {
                        break;
                    }
                    try {
                        DiskManagerCheckRequest request = disk_manager.createCheckRequest(i, null);
                        request.setLowPriority(true);
                        checker.enqueueCheckRequest(request, new DiskManagerCheckRequestListener() {

                            @Override
                            public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
                                if (TEST_RECHECK_FAILURE_HANDLING && (int) (Math.random() * 10) == 0) {
                                    disk_manager.getPiece(request.getPieceNumber()).setDone(false);
                                    passed = false;
                                }
                                if (!passed) {
                                    synchronized (failed_pieces) {
                                        failed_pieces.add(request);
                                    }
                                }
                                complete();
                            }

                            @Override
                            public void checkCancelled(DiskManagerCheckRequest request) {
                                complete();
                            }

                            @Override
                            public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
                                complete();
                            }

                            protected void complete() {
                                run_sem.release();
                                pending_checks_sem.release();
                            }
                        });
                        pending_check_num++;
                    } catch (Throwable e) {
                        Debug.printStackTrace(e);
                    }
                }
                while (pending_check_num > 0) {
                    pending_checks_sem.reserve();
                    pending_check_num--;
                }
            }
            if (failed_pieces.size() > 0 && !TEST_RECHECK_FAILURE_HANDLING) {
                byte[][] piece_hashes = disk_manager.getTorrent().getPieces();
                ByteArrayHashMap<Integer> hash_map = new ByteArrayHashMap<>();
                for (int i = 0; i < piece_hashes.length; i++) {
                    hash_map.put(piece_hashes[i], i);
                }
                for (DiskManagerCheckRequest request : failed_pieces) {
                    while (!stopped) {
                        if (recheck_inst.getPermission()) {
                            break;
                        }
                    }
                    if (stopped) {
                        break;
                    }
                    byte[] hash = request.getHash();
                    if (hash != null) {
                        final Integer target_index = hash_map.get(hash);
                        int current_index = request.getPieceNumber();
                        int piece_size = disk_manager.getPieceLength(current_index);
                        if (target_index != null && target_index != current_index && disk_manager.getPieceLength(target_index) == piece_size && !disk_manager.isDone(target_index)) {
                            final AESemaphore sem = new AESemaphore("PieceReorder");
                            disk_manager.enqueueReadRequest(disk_manager.createReadRequest(current_index, 0, piece_size), new DiskManagerReadRequestListener() {

                                @Override
                                public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) {
                                    try {
                                        disk_manager.enqueueWriteRequest(disk_manager.createWriteRequest(target_index, 0, data, null), new DiskManagerWriteRequestListener() {

                                            @Override
                                            public void writeCompleted(DiskManagerWriteRequest request) {
                                                try {
                                                    DiskManagerCheckRequest check_request = disk_manager.createCheckRequest(target_index, null);
                                                    check_request.setLowPriority(true);
                                                    checker.enqueueCheckRequest(check_request, new DiskManagerCheckRequestListener() {

                                                        @Override
                                                        public void checkCompleted(DiskManagerCheckRequest request, boolean passed) {
                                                            sem.release();
                                                        }

                                                        @Override
                                                        public void checkCancelled(DiskManagerCheckRequest request) {
                                                            sem.release();
                                                        }

                                                        @Override
                                                        public void checkFailed(DiskManagerCheckRequest request, Throwable cause) {
                                                            sem.release();
                                                        }
                                                    });
                                                } catch (Throwable e) {
                                                    sem.release();
                                                }
                                            }

                                            @Override
                                            public void writeFailed(DiskManagerWriteRequest request, Throwable cause) {
                                                sem.release();
                                            }
                                        });
                                    } catch (Throwable e) {
                                        sem.release();
                                    }
                                }

                                @Override
                                public void readFailed(DiskManagerReadRequest request, Throwable cause) {
                                    sem.release();
                                }

                                @Override
                                public int getPriority() {
                                    return (-1);
                                }

                                @Override
                                public void requestExecuted(long bytes) {
                                }
                            });
                            sem.reserve();
                        }
                    }
                }
            }
        } finally {
            check_in_progress = false;
        }
        if (!(stopped || resume_data_complete)) {
            try {
                saveResumeData(true);
            } catch (Exception e) {
                Debug.out("Failed to dump initial resume data to disk");
                Debug.printStackTrace(e);
            }
        }
    } catch (Throwable e) {
        // if something went wrong then log and continue.
        Debug.printStackTrace(e);
    } finally {
        recheck_inst.unregister();
    // System.out.println( "Check of '" + disk_manager.getDownloadManager().getDisplayName() + "' completed in " + (System.currentTimeMillis() - start));
    }
}
Also used : ByteArrayHashMap(com.biglybt.core.util.ByteArrayHashMap) AESemaphore(com.biglybt.core.util.AESemaphore) DiskManagerFileInfoImpl(com.biglybt.core.disk.impl.DiskManagerFileInfoImpl) ByteArrayHashMap(com.biglybt.core.util.ByteArrayHashMap) DMPieceMapEntry(com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry) DMPieceList(com.biglybt.core.disk.impl.piecemapper.DMPieceList) LogEvent(com.biglybt.core.logging.LogEvent) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException) DiskManagerRecheckInstance(com.biglybt.core.disk.impl.DiskManagerRecheckInstance) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException) DMPieceList(com.biglybt.core.disk.impl.piecemapper.DMPieceList) DMPieceMapEntry(com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry) ByteArrayHashMap(com.biglybt.core.util.ByteArrayHashMap) DirectByteBuffer(com.biglybt.core.util.DirectByteBuffer)

Example 2 with CacheFileManagerException

use of com.biglybt.core.diskmanager.cache.CacheFileManagerException in project BiglyBT by BiglySoftware.

the class CacheFileWithCache method writeCache.

protected void writeCache(DirectByteBuffer file_buffer, long file_position, boolean buffer_handed_over) throws CacheFileManagerException {
    checkPendingException();
    boolean buffer_cached = false;
    boolean failed = false;
    try {
        int file_buffer_position = file_buffer.position(SS_CACHE);
        int file_buffer_limit = file_buffer.limit(SS_CACHE);
        int write_length = file_buffer_limit - file_buffer_position;
        if (write_length == 0) {
            // nothing to do
            return;
        }
        if (AEDiagnostics.CHECK_DUMMY_FILE_DATA) {
            long temp_position = file_position + file_offset_in_torrent;
            while (file_buffer.hasRemaining(SS_CACHE)) {
                byte v = file_buffer.get(SS_CACHE);
                if ((byte) temp_position != v) {
                    System.out.println("writeCache: write is bad at " + temp_position + ": expected = " + (byte) temp_position + ", actual = " + v);
                    break;
                }
                temp_position++;
            }
            file_buffer.position(SS_CACHE, file_buffer_position);
        }
        if (manager.isWriteCacheEnabled()) {
            if (TRACE)
                Logger.log(new LogEvent(torrent, LOGID, "writeCache: " + getName() + ", " + file_position + " - " + (file_position + write_length - 1) + ":" + file_buffer_position + "/" + file_buffer_limit));
            if ((!buffer_handed_over) && write_length < piece_size) {
                if (TRACE)
                    Logger.log(new LogEvent(torrent, LOGID, "    making copy of non-handedover buffer"));
                DirectByteBuffer cache_buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_CACHE_WRITE, write_length);
                cache_buffer.put(SS_CACHE, file_buffer);
                cache_buffer.position(SS_CACHE, 0);
                // make it look like this buffer has been handed over
                file_buffer = cache_buffer;
                file_buffer_position = 0;
                file_buffer_limit = write_length;
                buffer_handed_over = true;
            }
            if (buffer_handed_over) {
                // cache this write, allocate outside sync block (see manager for details)
                CacheEntry entry = manager.allocateCacheSpace(CacheEntry.CT_DATA_WRITE, this, file_buffer, file_position, write_length);
                try {
                    this_mon.enter();
                    if (access_mode != CF_WRITE) {
                        throw (new CacheFileManagerException(this, "Write failed - cache file is read only"));
                    }
                    // if we are overwriting stuff already in the cache then force-write overlapped
                    // data (easiest solution as this should only occur on hash-fails)
                    // do the flush and add sychronized to avoid possibility of another
                    // thread getting in-between and adding same block thus causing mutiple entries
                    // for same space
                    flushCache(file_position, write_length, true, -1, 0, -1);
                    cache.add(entry);
                    manager.addCacheSpace(entry);
                } finally {
                    this_mon.exit();
                }
                manager.cacheBytesWritten(write_length);
                bytes_written += write_length;
                buffer_cached = true;
            } else {
                try {
                    this_mon.enter();
                    flushCache(file_position, write_length, true, -1, 0, -1);
                    getFMFile().write(file_buffer, file_position);
                } finally {
                    this_mon.exit();
                }
                manager.fileBytesWritten(write_length);
                bytes_written += write_length;
            }
        } else {
            getFMFile().write(file_buffer, file_position);
            manager.fileBytesWritten(write_length);
            bytes_written += write_length;
        }
    } catch (CacheFileManagerException e) {
        failed = true;
        throw (e);
    } catch (FMFileManagerException e) {
        failed = true;
        manager.rethrow(this, e);
    } finally {
        if (buffer_handed_over) {
            if (!(failed || buffer_cached)) {
                file_buffer.returnToPool();
            }
        }
    }
}
Also used : FMFileManagerException(com.biglybt.core.diskmanager.file.FMFileManagerException) LogEvent(com.biglybt.core.logging.LogEvent) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException)

Example 3 with CacheFileManagerException

use of com.biglybt.core.diskmanager.cache.CacheFileManagerException in project BiglyBT by BiglySoftware.

the class CacheFileWithCache method multiBlockFlush.

protected void multiBlockFlush(List multi_block_entries, long multi_block_start, long multi_block_next, boolean release_entries) throws CacheFileManagerException {
    boolean write_ok = false;
    try {
        if (TRACE)
            Logger.log(new LogEvent(torrent, LOGID, "multiBlockFlush: writing " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "," + release_entries + "]"));
        DirectByteBuffer[] buffers = new DirectByteBuffer[multi_block_entries.size()];
        long expected_per_entry_write = 0;
        for (int i = 0; i < buffers.length; i++) {
            CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
            // sanitity check - we should always be flushing entire entries
            DirectByteBuffer buffer = entry.getBuffer();
            if (buffer.limit(SS_CACHE) - buffer.position(SS_CACHE) != entry.getLength()) {
                throw (new CacheFileManagerException(this, "flush: inconsistent entry length, position wrong"));
            }
            expected_per_entry_write += entry.getLength();
            buffers[i] = buffer;
        }
        long expected_overall_write = multi_block_next - multi_block_start;
        if (expected_per_entry_write != expected_overall_write) {
            throw (new CacheFileManagerException(this, "flush: inconsistent write length, entrys = " + expected_per_entry_write + " overall = " + expected_overall_write));
        }
        getFMFile().write(buffers, multi_block_start);
        manager.fileBytesWritten(expected_overall_write);
        // bytes_written += expected_overall_write;
        write_ok = true;
    } catch (FMFileManagerException e) {
        throw (new CacheFileManagerException(this, "flush fails", e));
    } finally {
        for (int i = 0; i < multi_block_entries.size(); i++) {
            CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
            if (release_entries) {
                manager.releaseCacheSpace(entry);
            } else {
                entry.resetBufferPosition();
                if (write_ok) {
                    entry.setClean();
                }
            }
        }
    }
}
Also used : FMFileManagerException(com.biglybt.core.diskmanager.file.FMFileManagerException) LogEvent(com.biglybt.core.logging.LogEvent) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException)

Example 4 with CacheFileManagerException

use of com.biglybt.core.diskmanager.cache.CacheFileManagerException in project BiglyBT by BiglySoftware.

the class CacheFileWithCache method flushCacheSupport.

protected void flushCacheSupport(long file_position, // -1 -> do all from position onwards
long length, boolean release_entries, // -1 -> all
long minimum_to_release, // dirty entries newer than this won't be flushed
long oldest_dirty_time, // minimum contiguous size for flushing, -1 -> no limit
long min_chunk_size) throws CacheFileManagerException {
    try {
        this_mon.enter();
        if (cache.size() == 0) {
            return;
        }
        Iterator it = cache.iterator();
        Throwable last_failure = null;
        long entry_total_released = 0;
        List multi_block_entries = new ArrayList();
        long multi_block_start = -1;
        long multi_block_next = -1;
        while (it.hasNext()) {
            CacheEntry entry = (CacheEntry) it.next();
            long entry_file_position = entry.getFilePosition();
            int entry_length = entry.getLength();
            if (entry_file_position + entry_length <= file_position) {
                continue;
            } else if (length != -1 && file_position + length <= entry_file_position) {
                break;
            }
            // overlap!!!!
            // we're going to deal with this entry one way or another. In particular if
            // we are releasing entries then this is guaranteed to be released, either directly
            // or via a flush if dirty
            boolean dirty = entry.isDirty();
            try {
                if (dirty && (oldest_dirty_time == 0 || entry.getLastUsed() < oldest_dirty_time)) {
                    if (multi_block_start == -1) {
                        // start of day
                        multi_block_start = entry_file_position;
                        multi_block_next = entry_file_position + entry_length;
                        multi_block_entries.add(entry);
                    } else if (multi_block_next == entry_file_position) {
                        // continuation, add in
                        multi_block_next = entry_file_position + entry_length;
                        multi_block_entries.add(entry);
                    } else {
                        // we've got a gap - flush current and start another series
                        // set up ready for next block in case the flush fails - we try
                        // and flush as much as possible in the face of failure
                        boolean skip_chunk = false;
                        if (min_chunk_size != -1) {
                            if (release_entries) {
                                Debug.out("CacheFile: can't use min chunk with release option");
                            } else {
                                skip_chunk = multi_block_next - multi_block_start < min_chunk_size;
                            }
                        }
                        List f_multi_block_entries = multi_block_entries;
                        long f_multi_block_start = multi_block_start;
                        long f_multi_block_next = multi_block_next;
                        multi_block_start = entry_file_position;
                        multi_block_next = entry_file_position + entry_length;
                        multi_block_entries = new ArrayList();
                        multi_block_entries.add(entry);
                        if (skip_chunk) {
                            if (TRACE)
                                Logger.log(new LogEvent(torrent, LOGID, "flushCache: skipping " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "] as too small"));
                        } else {
                            multiBlockFlush(f_multi_block_entries, f_multi_block_start, f_multi_block_next, release_entries);
                        }
                    }
                }
            } catch (Throwable e) {
                last_failure = e;
            } finally {
                if (release_entries) {
                    it.remove();
                    if (!dirty) {
                        manager.releaseCacheSpace(entry);
                    }
                    entry_total_released += entry.getLength();
                    if (minimum_to_release != -1 && entry_total_released > minimum_to_release) {
                        break;
                    }
                }
            }
        }
        if (multi_block_start != -1) {
            boolean skip_chunk = false;
            if (min_chunk_size != -1) {
                if (release_entries) {
                    Debug.out("CacheFile: can't use min chunk with release option");
                } else {
                    skip_chunk = multi_block_next - multi_block_start < min_chunk_size;
                }
            }
            if (skip_chunk) {
                if (TRACE)
                    Logger.log(new LogEvent(torrent, LOGID, "flushCache: skipping " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "] as too small"));
            } else {
                multiBlockFlush(multi_block_entries, multi_block_start, multi_block_next, release_entries);
            }
        }
        if (last_failure != null) {
            if (last_failure instanceof CacheFileManagerException) {
                throw ((CacheFileManagerException) last_failure);
            }
            throw (new CacheFileManagerException(this, "cache flush failed", last_failure));
        }
    } finally {
        this_mon.exit();
    }
}
Also used : LogEvent(com.biglybt.core.logging.LogEvent) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException)

Example 5 with CacheFileManagerException

use of com.biglybt.core.diskmanager.cache.CacheFileManagerException in project BiglyBT by BiglySoftware.

the class CacheFileWithCache method readCache.

protected void readCache(final DirectByteBuffer file_buffer, final long file_position, final boolean recursive, final boolean disable_read_cache) throws CacheFileManagerException {
    checkPendingException();
    final int file_buffer_position = file_buffer.position(SS_CACHE);
    final int file_buffer_limit = file_buffer.limit(SS_CACHE);
    final int read_length = file_buffer_limit - file_buffer_position;
    try {
        if (manager.isCacheEnabled()) {
            if (TRACE)
                Logger.log(new LogEvent(torrent, LOGID, "readCache: " + getName() + ", " + file_position + " - " + (file_position + read_length - 1) + ":" + file_buffer_position + "/" + file_buffer_limit));
            if (read_length == 0) {
                // nothing to do
                return;
            }
            long writing_file_position = file_position;
            int writing_left = read_length;
            boolean ok = true;
            int used_entries = 0;
            long used_read_ahead = 0;
            try {
                this_mon.enter();
                if (read_history == null) {
                    read_history = new long[READAHEAD_HISTORY];
                    Arrays.fill(read_history, -1);
                }
                // record the position of the byte *following* the end of this read
                read_history[read_history_next++] = file_position + read_length;
                if (read_history_next == READAHEAD_HISTORY) {
                    read_history_next = 0;
                }
                Iterator it = cache.iterator();
                while (ok && writing_left > 0 && it.hasNext()) {
                    CacheEntry entry = (CacheEntry) it.next();
                    long entry_file_position = entry.getFilePosition();
                    int entry_length = entry.getLength();
                    if (entry_file_position > writing_file_position) {
                        // data missing at the start of the read section
                        ok = false;
                        break;
                    } else if (entry_file_position + entry_length <= writing_file_position) {
                    // not got there yet
                    } else {
                        // copy required amount into read buffer
                        int skip = (int) (writing_file_position - entry_file_position);
                        int available = entry_length - skip;
                        if (available > writing_left) {
                            available = writing_left;
                        }
                        DirectByteBuffer entry_buffer = entry.getBuffer();
                        int entry_buffer_position = entry_buffer.position(SS_CACHE);
                        int entry_buffer_limit = entry_buffer.limit(SS_CACHE);
                        try {
                            entry_buffer.limit(SS_CACHE, entry_buffer_position + skip + available);
                            entry_buffer.position(SS_CACHE, entry_buffer_position + skip);
                            if (TRACE)
                                Logger.log(new LogEvent(torrent, LOGID, "cacheRead: using " + entry.getString() + "[" + entry_buffer.position(SS_CACHE) + "/" + entry_buffer.limit(SS_CACHE) + "]" + "to write to [" + file_buffer.position(SS_CACHE) + "/" + file_buffer.limit(SS_CACHE) + "]"));
                            used_entries++;
                            file_buffer.put(SS_CACHE, entry_buffer);
                            manager.cacheEntryUsed(entry);
                        } finally {
                            entry_buffer.limit(SS_CACHE, entry_buffer_limit);
                            entry_buffer.position(SS_CACHE, entry_buffer_position);
                        }
                        writing_file_position += available;
                        writing_left -= available;
                        if (entry.getType() == CacheEntry.CT_READ_AHEAD) {
                            used_read_ahead += available;
                        }
                    }
                }
            } finally {
                if (ok) {
                    read_ahead_bytes_used += used_read_ahead;
                }
                this_mon.exit();
            }
            if (ok && writing_left == 0) {
                if (!recursive) {
                    manager.cacheBytesRead(read_length);
                    bytes_read += read_length;
                }
                if (TRACE)
                    Logger.log(new LogEvent(torrent, LOGID, "cacheRead: cache use ok [entries = " + used_entries + "]"));
            } else {
                if (TRACE)
                    Logger.log(new LogEvent(torrent, LOGID, "cacheRead: cache use fails, reverting to plain read"));
                // reset in case we've done some partial reads
                file_buffer.position(SS_CACHE, file_buffer_position);
                for (int i = 0; i < 2; i++) {
                    try {
                        boolean do_read_ahead = // first time round
                        i == 0 && !recursive && !disable_read_cache && read_history != null && manager.isReadCacheEnabled() && read_length < current_read_ahead_size && file_position + current_read_ahead_size <= file.getLength();
                        if (do_read_ahead) {
                            // only read ahead if this is a continuation of a prior read within history
                            do_read_ahead = false;
                            for (int j = 0; j < READAHEAD_HISTORY; j++) {
                                if (read_history[j] == file_position) {
                                    do_read_ahead = true;
                                    break;
                                }
                            }
                        }
                        int actual_read_ahead = current_read_ahead_size;
                        if (do_read_ahead) {
                            // don't read ahead over the end of a piece
                            int request_piece_offset = (int) ((file_position - piece_offset) % piece_size);
                            if (request_piece_offset < 0) {
                                request_piece_offset += piece_size;
                            }
                            // System.out.println( "request offset = " + request_piece_offset );
                            int data_left = piece_size - request_piece_offset;
                            if (data_left < actual_read_ahead) {
                                actual_read_ahead = data_left;
                                if (actual_read_ahead <= read_length) {
                                    do_read_ahead = false;
                                }
                            // System.out.println( "    trimmed to " + data_left );
                            }
                        }
                        if (do_read_ahead) {
                            if (TRACE)
                                Logger.log(new LogEvent(torrent, LOGID, "\tperforming read-ahead"));
                            DirectByteBuffer cache_buffer = DirectByteBufferPool.getBuffer(DirectByteBuffer.AL_CACHE_READ, actual_read_ahead);
                            boolean buffer_cached = false;
                            try {
                                // must allocate space OUTSIDE sync block (see manager for details)
                                CacheEntry entry = manager.allocateCacheSpace(CacheEntry.CT_READ_AHEAD, this, cache_buffer, file_position, actual_read_ahead);
                                entry.setClean();
                                try {
                                    this_mon.enter();
                                    // flush before read so that any bits in cache get re-read correctly on read
                                    flushCache(file_position, actual_read_ahead, true, -1, 0, -1);
                                    getFMFile().read(cache_buffer, file_position);
                                    read_ahead_bytes_made += actual_read_ahead;
                                    manager.fileBytesRead(actual_read_ahead);
                                    bytes_read += actual_read_ahead;
                                    cache_buffer.position(SS_CACHE, 0);
                                    cache.add(entry);
                                    manager.addCacheSpace(entry);
                                } finally {
                                    this_mon.exit();
                                }
                                buffer_cached = true;
                            } finally {
                                if (!buffer_cached) {
                                    // if the read operation failed, and hence the buffer
                                    // wasn't added to the cache, then release it here
                                    cache_buffer.returnToPool();
                                }
                            }
                            // recursively read from the cache, should hit the data we just read although
                            // there is the possibility that it could be flushed before then - hence the
                            // recursion flag that will avoid this happening next time around
                            readCache(file_buffer, file_position, true, disable_read_cache);
                        } else {
                            if (TRACE)
                                Logger.log(new LogEvent(torrent, LOGID, "\tnot performing read-ahead"));
                            try {
                                this_mon.enter();
                                flushCache(file_position, read_length, true, -1, 0, -1);
                                getFMFile().read(file_buffer, file_position);
                            } finally {
                                this_mon.exit();
                            }
                            manager.fileBytesRead(read_length);
                            bytes_read += read_length;
                        }
                        break;
                    } catch (CacheFileManagerException e) {
                        if (i == 1) {
                            throw (e);
                        }
                    } catch (FMFileManagerException e) {
                        if (i == 1) {
                            manager.rethrow(this, e);
                        }
                    }
                }
            }
        } else {
            try {
                getFMFile().read(file_buffer, file_position);
                manager.fileBytesRead(read_length);
                bytes_read += read_length;
            } catch (FMFileManagerException e) {
                manager.rethrow(this, e);
            }
        }
    } finally {
        if (AEDiagnostics.CHECK_DUMMY_FILE_DATA) {
            long temp_position = file_position + file_offset_in_torrent;
            file_buffer.position(SS_CACHE, file_buffer_position);
            while (file_buffer.hasRemaining(SS_CACHE)) {
                byte v = file_buffer.get(SS_CACHE);
                if ((byte) temp_position != v) {
                    System.out.println("readCache: read is bad at " + temp_position + ": expected = " + (byte) temp_position + ", actual = " + v);
                    file_buffer.position(SS_CACHE, file_buffer_limit);
                    break;
                }
                temp_position++;
            }
        }
    }
}
Also used : FMFileManagerException(com.biglybt.core.diskmanager.file.FMFileManagerException) LogEvent(com.biglybt.core.logging.LogEvent) CacheFileManagerException(com.biglybt.core.diskmanager.cache.CacheFileManagerException)

Aggregations

CacheFileManagerException (com.biglybt.core.diskmanager.cache.CacheFileManagerException)7 LogEvent (com.biglybt.core.logging.LogEvent)5 FMFileManagerException (com.biglybt.core.diskmanager.file.FMFileManagerException)3 CacheFile (com.biglybt.core.diskmanager.cache.CacheFile)2 DirectByteBuffer (com.biglybt.core.util.DirectByteBuffer)2 DiskManagerFileInfoImpl (com.biglybt.core.disk.impl.DiskManagerFileInfoImpl)1 DiskManagerRecheckInstance (com.biglybt.core.disk.impl.DiskManagerRecheckInstance)1 DMPieceList (com.biglybt.core.disk.impl.piecemapper.DMPieceList)1 DMPieceMapEntry (com.biglybt.core.disk.impl.piecemapper.DMPieceMapEntry)1 TOTorrentFile (com.biglybt.core.torrent.TOTorrentFile)1 AESemaphore (com.biglybt.core.util.AESemaphore)1 ByteArrayHashMap (com.biglybt.core.util.ByteArrayHashMap)1 File (java.io.File)1 IOException (java.io.IOException)1