Search in sources :

Example 1 with DiskErrorCacheException

use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.

the class ConsistentReplicaStore method get.

/**
 * Retrieves a CacheRepositoryEntry from the wrapped meta data store. If the entry is missing or
 * fails consistency checks, the entry is reconstructed with information from PNFS.
 */
@Override
public ReplicaRecord get(PnfsId id) throws IllegalArgumentException, CacheException {
    ReplicaRecord entry = _replicaStore.get(id);
    if (entry != null && isBroken(entry)) {
        LOGGER.warn("Recovering {}...", id);
        try {
            /* It is safe to remove FROM_STORE/FROM_POOL replicas: We have
                 * another copy anyway. Files in REMOVED or DESTROYED
                 * were about to be deleted, so we can finish the job.
                 */
            switch(entry.getState()) {
                case FROM_POOL:
                case FROM_STORE:
                case REMOVED:
                case DESTROYED:
                    _replicaStore.remove(id);
                    _pnfsHandler.clearCacheLocation(id);
                    LOGGER.info("Recovering: Removed {} because it was not fully staged.", id);
                    return null;
            }
            entry = rebuildEntry(entry);
        } catch (IOException e) {
            throw new DiskErrorCacheException("I/O error in healer: " + messageOrClassName(e), e);
        } catch (FileNotFoundCacheException e) {
            _replicaStore.remove(id);
            LOGGER.warn("Recovering: Removed {} because name space entry was deleted.", id);
            return null;
        } catch (FileIsNewCacheException e) {
            _replicaStore.remove(id);
            LOGGER.warn("Recovering: Removed {}: {}", id, e.getMessage());
            return null;
        } catch (TimeoutCacheException e) {
            throw e;
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new CacheException("Pool is shutting down", e);
        } catch (CacheException | NoSuchAlgorithmException e) {
            entry.update("Failed to recover replica: " + e.getMessage(), r -> r.setState(ReplicaState.BROKEN));
            LOGGER.error(AlarmMarkerFactory.getMarker(PredefinedAlarm.BROKEN_FILE, id.toString(), _poolName), "Marked {} bad: {}.", id, e.getMessage());
        }
    }
    return entry;
}
Also used : AlarmMarkerFactory(org.dcache.alarms.AlarmMarkerFactory) ReplicaStatePolicy(org.dcache.pool.classic.ReplicaStatePolicy) FileIsNewCacheException(diskCacheV111.util.FileIsNewCacheException) AccessLatency(diskCacheV111.util.AccessLatency) PredefinedAlarm(org.dcache.alarms.PredefinedAlarm) LoggerFactory(org.slf4j.LoggerFactory) Exceptions.messageOrClassName(org.dcache.util.Exceptions.messageOrClassName) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) RETENTION_POLICY(org.dcache.namespace.FileAttribute.RETENTION_POLICY) PnfsHandler(diskCacheV111.util.PnfsHandler) CacheException(diskCacheV111.util.CacheException) Iterables.concat(com.google.common.collect.Iterables.concat) SIZE(org.dcache.namespace.FileAttribute.SIZE) FileNotFoundCacheException(diskCacheV111.util.FileNotFoundCacheException) EnumSet(java.util.EnumSet) ChecksumModule(org.dcache.pool.classic.ChecksumModule) FileAttributes(org.dcache.vehicles.FileAttributes) STORAGEINFO(org.dcache.namespace.FileAttribute.STORAGEINFO) PnfsId(diskCacheV111.util.PnfsId) Logger(org.slf4j.Logger) OpenOption(java.nio.file.OpenOption) LOCATIONS(org.dcache.namespace.FileAttribute.LOCATIONS) TimeoutCacheException(diskCacheV111.util.TimeoutCacheException) Set(java.util.Set) IOException(java.io.IOException) Sets(com.google.common.collect.Sets) Checksum(org.dcache.util.Checksum) List(java.util.List) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) FileAttribute(org.dcache.namespace.FileAttribute) Collections(java.util.Collections) ACCESS_LATENCY(org.dcache.namespace.FileAttribute.ACCESS_LATENCY) CHECKSUM(org.dcache.namespace.FileAttribute.CHECKSUM) RetentionPolicy(diskCacheV111.util.RetentionPolicy) FileIsNewCacheException(diskCacheV111.util.FileIsNewCacheException) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) CacheException(diskCacheV111.util.CacheException) FileNotFoundCacheException(diskCacheV111.util.FileNotFoundCacheException) TimeoutCacheException(diskCacheV111.util.TimeoutCacheException) FileNotFoundCacheException(diskCacheV111.util.FileNotFoundCacheException) FileIsNewCacheException(diskCacheV111.util.FileIsNewCacheException) IOException(java.io.IOException) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) TimeoutCacheException(diskCacheV111.util.TimeoutCacheException)

Example 2 with DiskErrorCacheException

use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.

the class MoverRequestScheduler method sendToExecution.

private void sendToExecution(final PrioritizedRequest request) {
    try (CDC ignore = request.getCdc().restore()) {
        request.transfer(new CompletionHandler<Void, Void>() {

            @Override
            public void completed(Void result, Void attachment) {
                postprocess();
            }

            @Override
            public void failed(Throwable exc, Void attachment) {
                if (exc instanceof InterruptedException || exc instanceof InterruptedIOException) {
                    request.getMover().setTransferStatus(CacheException.DEFAULT_ERROR_CODE, "Transfer was killed");
                } else if (exc instanceof DiskErrorCacheException) {
                    FaultEvent faultEvent = new FaultEvent("transfer", FaultAction.DISABLED, exc.getMessage(), exc);
                    _faultListeners.forEach(l -> l.faultOccurred(faultEvent));
                }
                postprocess();
            }

            private void postprocess() {
                try (CDC ignore = request.getCdc().restore()) {
                    request.getMover().close(new CompletionHandler<Void, Void>() {

                        @Override
                        public void completed(Void result, Void attachment) {
                            release();
                        }

                        @Override
                        public void failed(Throwable exc, Void attachment) {
                            if (exc instanceof DiskErrorCacheException) {
                                FaultEvent faultEvent = new FaultEvent("post-processing", FaultAction.DISABLED, exc.getMessage(), exc);
                                _faultListeners.forEach(l -> l.faultOccurred(faultEvent));
                            }
                            release();
                        }

                        private void release() {
                            request.done();
                            _jobs.remove(request.getId());
                            _moverByRequests.remove(request.getDoorUniqueId());
                            PrioritizedRequest nextRequest = nextOrRelease();
                            if (nextRequest != null) {
                                sendToExecution(nextRequest);
                            }
                        }
                    });
                }
            }
        });
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) CDC(dmg.cells.nucleus.CDC) CompletionHandler(java.nio.channels.CompletionHandler) FaultEvent(org.dcache.pool.FaultEvent) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException)

Example 3 with DiskErrorCacheException

use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.

the class RepositorySubsystemTest method testCreateEntryFromStore.

@Test
public void testCreateEntryFromStore() throws Throwable {
    repository.init();
    repository.load();
    stateChangeEvents.clear();
    new CellStubHelper(cell) {

        @Message(required = true, step = 1, cell = "pnfs")
        public Object message(PnfsSetFileAttributes msg) {
            if (msg.getFileAttributes().isDefined(FileAttribute.SIZE)) {
                return new CacheException("");
            }
            msg.setSucceeded();
            return msg;
        }

        @Override
        protected void run() throws CacheException, InterruptedException {
            List<StickyRecord> stickyRecords = Collections.emptyList();
            ReplicaDescriptor handle = repository.createEntry(attributes5, FROM_STORE, CACHED, stickyRecords, EnumSet.noneOf(OpenFlags.class), OptionalLong.empty());
            try {
                createFile(handle, attributes5.getSize());
                handle.commit();
            } catch (IOException e) {
                throw new DiskErrorCacheException(e.getMessage());
            } finally {
                handle.close();
            }
        }
    };
}
Also used : StickyRecord(org.dcache.pool.repository.StickyRecord) ReplicaDescriptor(org.dcache.pool.repository.ReplicaDescriptor) OpenFlags(org.dcache.pool.repository.Repository.OpenFlags) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) LockedCacheException(diskCacheV111.util.LockedCacheException) FileInCacheException(diskCacheV111.util.FileInCacheException) CacheException(diskCacheV111.util.CacheException) FileNotInCacheException(diskCacheV111.util.FileNotInCacheException) PnfsSetFileAttributes(org.dcache.vehicles.PnfsSetFileAttributes) CellStubHelper(org.dcache.tests.cells.CellStubHelper) IOException(java.io.IOException) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) Test(org.junit.Test)

Example 4 with DiskErrorCacheException

use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.

the class CacheRepositoryEntryImpl method setFileAttributes.

private Void setFileAttributes(FileAttributes attributes) throws CacheException {
    try {
        String id = _pnfsId.toString();
        // invalidate cached value
        _storageInfoCache.clear();
        // TODO to check the case when STORAGEINFO size=0
        if (attributes.isDefined(FileAttribute.STORAGEINFO)) {
            _repository.getStorageInfoMap().put(id, StorageInfos.extractFrom(attributes));
        } else {
            _repository.getStorageInfoMap().remove(id);
        }
        // TODO check should there be separate methods
        if (attributes.isDefined(FileAttribute.ACCESS_TIME) && attributes.isDefined(FileAttribute.CREATION_TIME)) {
            AccessTimeInfo accessTimeInfo = new AccessTimeInfo();
            accessTimeInfo.setLastAccessTime(attributes.getAccessTime());
            accessTimeInfo.setCreationTime(attributes.getCreationTime());
            _repository.getAccessTimeInfo().put(id, accessTimeInfo);
        } else {
            _repository.getAccessTimeInfo().remove(id);
        }
    } catch (EnvironmentFailureException e) {
        if (!_repository.isValid()) {
            throw new DiskErrorCacheException("Meta data update failed and a pool restart is required: " + e.getMessage(), e);
        }
        throw new CacheException("Meta data update failed: " + e.getMessage(), e);
    } catch (OperationFailureException e) {
        throw new CacheException("Meta data update failed: " + e.getMessage(), e);
    }
    return null;
}
Also used : DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException) CacheException(diskCacheV111.util.CacheException) EnvironmentFailureException(com.sleepycat.je.EnvironmentFailureException) OperationFailureException(com.sleepycat.je.OperationFailureException) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException)

Example 5 with DiskErrorCacheException

use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.

the class FileMetaDataRepository method create.

@Override
public ReplicaRecord create(PnfsId id, Set<? extends OpenOption> flags) throws DuplicateEntryException, CacheException {
    try {
        Path controlFile = _metadir.resolve(id.toString());
        Path siFile = _metadir.resolve("SI-" + id.toString());
        if (_fileStore.contains(id)) {
            throw new DuplicateEntryException(id);
        }
        /* In case of left over or corrupted files, we delete them
             * before creating a new entry.
             */
        Files.deleteIfExists(controlFile);
        Files.deleteIfExists(siFile);
        if (flags.contains(StandardOpenOption.CREATE)) {
            _fileStore.create(id);
        }
        return new CacheRepositoryEntryImpl(id, controlFile, _fileStore, siFile);
    } catch (IOException e) {
        throw new DiskErrorCacheException("Failed to create new entry " + id + ": " + messageOrClassName(e), e);
    }
}
Also used : Path(java.nio.file.Path) DuplicateEntryException(org.dcache.pool.repository.DuplicateEntryException) IOException(java.io.IOException) DiskErrorCacheException(diskCacheV111.util.DiskErrorCacheException)

Aggregations

DiskErrorCacheException (diskCacheV111.util.DiskErrorCacheException)17 IOException (java.io.IOException)14 CacheException (diskCacheV111.util.CacheException)10 EnvironmentFailureException (com.sleepycat.je.EnvironmentFailureException)5 OperationFailureException (com.sleepycat.je.OperationFailureException)5 PnfsId (diskCacheV111.util.PnfsId)4 Path (java.nio.file.Path)4 DuplicateEntryException (org.dcache.pool.repository.DuplicateEntryException)4 Stopwatch (com.google.common.base.Stopwatch)3 OpenOption (java.nio.file.OpenOption)3 List (java.util.List)3 Set (java.util.Set)3 MongoException (com.mongodb.MongoException)2 StorageInfo (diskCacheV111.vehicles.StorageInfo)2 StandardOpenOption (java.nio.file.StandardOpenOption)2 Exceptions.messageOrClassName (org.dcache.util.Exceptions.messageOrClassName)2 Logger (org.slf4j.Logger)2 LoggerFactory (org.slf4j.LoggerFactory)2 Iterables.concat (com.google.common.collect.Iterables.concat)1 Sets (com.google.common.collect.Sets)1