use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.
the class ConsistentReplicaStore method get.
/**
* Retrieves a CacheRepositoryEntry from the wrapped meta data store. If the entry is missing or
* fails consistency checks, the entry is reconstructed with information from PNFS.
*/
@Override
public ReplicaRecord get(PnfsId id) throws IllegalArgumentException, CacheException {
ReplicaRecord entry = _replicaStore.get(id);
if (entry != null && isBroken(entry)) {
LOGGER.warn("Recovering {}...", id);
try {
/* It is safe to remove FROM_STORE/FROM_POOL replicas: We have
* another copy anyway. Files in REMOVED or DESTROYED
* were about to be deleted, so we can finish the job.
*/
switch(entry.getState()) {
case FROM_POOL:
case FROM_STORE:
case REMOVED:
case DESTROYED:
_replicaStore.remove(id);
_pnfsHandler.clearCacheLocation(id);
LOGGER.info("Recovering: Removed {} because it was not fully staged.", id);
return null;
}
entry = rebuildEntry(entry);
} catch (IOException e) {
throw new DiskErrorCacheException("I/O error in healer: " + messageOrClassName(e), e);
} catch (FileNotFoundCacheException e) {
_replicaStore.remove(id);
LOGGER.warn("Recovering: Removed {} because name space entry was deleted.", id);
return null;
} catch (FileIsNewCacheException e) {
_replicaStore.remove(id);
LOGGER.warn("Recovering: Removed {}: {}", id, e.getMessage());
return null;
} catch (TimeoutCacheException e) {
throw e;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException("Pool is shutting down", e);
} catch (CacheException | NoSuchAlgorithmException e) {
entry.update("Failed to recover replica: " + e.getMessage(), r -> r.setState(ReplicaState.BROKEN));
LOGGER.error(AlarmMarkerFactory.getMarker(PredefinedAlarm.BROKEN_FILE, id.toString(), _poolName), "Marked {} bad: {}.", id, e.getMessage());
}
}
return entry;
}
use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.
the class MoverRequestScheduler method sendToExecution.
private void sendToExecution(final PrioritizedRequest request) {
try (CDC ignore = request.getCdc().restore()) {
request.transfer(new CompletionHandler<Void, Void>() {
@Override
public void completed(Void result, Void attachment) {
postprocess();
}
@Override
public void failed(Throwable exc, Void attachment) {
if (exc instanceof InterruptedException || exc instanceof InterruptedIOException) {
request.getMover().setTransferStatus(CacheException.DEFAULT_ERROR_CODE, "Transfer was killed");
} else if (exc instanceof DiskErrorCacheException) {
FaultEvent faultEvent = new FaultEvent("transfer", FaultAction.DISABLED, exc.getMessage(), exc);
_faultListeners.forEach(l -> l.faultOccurred(faultEvent));
}
postprocess();
}
private void postprocess() {
try (CDC ignore = request.getCdc().restore()) {
request.getMover().close(new CompletionHandler<Void, Void>() {
@Override
public void completed(Void result, Void attachment) {
release();
}
@Override
public void failed(Throwable exc, Void attachment) {
if (exc instanceof DiskErrorCacheException) {
FaultEvent faultEvent = new FaultEvent("post-processing", FaultAction.DISABLED, exc.getMessage(), exc);
_faultListeners.forEach(l -> l.faultOccurred(faultEvent));
}
release();
}
private void release() {
request.done();
_jobs.remove(request.getId());
_moverByRequests.remove(request.getDoorUniqueId());
PrioritizedRequest nextRequest = nextOrRelease();
if (nextRequest != null) {
sendToExecution(nextRequest);
}
}
});
}
}
});
}
}
use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.
the class RepositorySubsystemTest method testCreateEntryFromStore.
@Test
public void testCreateEntryFromStore() throws Throwable {
repository.init();
repository.load();
stateChangeEvents.clear();
new CellStubHelper(cell) {
@Message(required = true, step = 1, cell = "pnfs")
public Object message(PnfsSetFileAttributes msg) {
if (msg.getFileAttributes().isDefined(FileAttribute.SIZE)) {
return new CacheException("");
}
msg.setSucceeded();
return msg;
}
@Override
protected void run() throws CacheException, InterruptedException {
List<StickyRecord> stickyRecords = Collections.emptyList();
ReplicaDescriptor handle = repository.createEntry(attributes5, FROM_STORE, CACHED, stickyRecords, EnumSet.noneOf(OpenFlags.class), OptionalLong.empty());
try {
createFile(handle, attributes5.getSize());
handle.commit();
} catch (IOException e) {
throw new DiskErrorCacheException(e.getMessage());
} finally {
handle.close();
}
}
};
}
use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.
the class CacheRepositoryEntryImpl method setFileAttributes.
private Void setFileAttributes(FileAttributes attributes) throws CacheException {
try {
String id = _pnfsId.toString();
// invalidate cached value
_storageInfoCache.clear();
// TODO to check the case when STORAGEINFO size=0
if (attributes.isDefined(FileAttribute.STORAGEINFO)) {
_repository.getStorageInfoMap().put(id, StorageInfos.extractFrom(attributes));
} else {
_repository.getStorageInfoMap().remove(id);
}
// TODO check should there be separate methods
if (attributes.isDefined(FileAttribute.ACCESS_TIME) && attributes.isDefined(FileAttribute.CREATION_TIME)) {
AccessTimeInfo accessTimeInfo = new AccessTimeInfo();
accessTimeInfo.setLastAccessTime(attributes.getAccessTime());
accessTimeInfo.setCreationTime(attributes.getCreationTime());
_repository.getAccessTimeInfo().put(id, accessTimeInfo);
} else {
_repository.getAccessTimeInfo().remove(id);
}
} catch (EnvironmentFailureException e) {
if (!_repository.isValid()) {
throw new DiskErrorCacheException("Meta data update failed and a pool restart is required: " + e.getMessage(), e);
}
throw new CacheException("Meta data update failed: " + e.getMessage(), e);
} catch (OperationFailureException e) {
throw new CacheException("Meta data update failed: " + e.getMessage(), e);
}
return null;
}
use of diskCacheV111.util.DiskErrorCacheException in project dcache by dCache.
the class FileMetaDataRepository method create.
@Override
public ReplicaRecord create(PnfsId id, Set<? extends OpenOption> flags) throws DuplicateEntryException, CacheException {
try {
Path controlFile = _metadir.resolve(id.toString());
Path siFile = _metadir.resolve("SI-" + id.toString());
if (_fileStore.contains(id)) {
throw new DuplicateEntryException(id);
}
/* In case of left over or corrupted files, we delete them
* before creating a new entry.
*/
Files.deleteIfExists(controlFile);
Files.deleteIfExists(siFile);
if (flags.contains(StandardOpenOption.CREATE)) {
_fileStore.create(id);
}
return new CacheRepositoryEntryImpl(id, controlFile, _fileStore, siFile);
} catch (IOException e) {
throw new DiskErrorCacheException("Failed to create new entry " + id + ": " + messageOrClassName(e), e);
}
}
Aggregations