use of org.dcache.pool.FaultEvent in project dcache by dCache.
the class MoverRequestScheduler method sendToExecution.
private void sendToExecution(final PrioritizedRequest request) {
try (CDC ignore = request.getCdc().restore()) {
request.transfer(new CompletionHandler<Void, Void>() {
@Override
public void completed(Void result, Void attachment) {
postprocess();
}
@Override
public void failed(Throwable exc, Void attachment) {
if (exc instanceof InterruptedException || exc instanceof InterruptedIOException) {
request.getMover().setTransferStatus(CacheException.DEFAULT_ERROR_CODE, "Transfer was killed");
} else if (exc instanceof DiskErrorCacheException) {
FaultEvent faultEvent = new FaultEvent("transfer", FaultAction.DISABLED, exc.getMessage(), exc);
_faultListeners.forEach(l -> l.faultOccurred(faultEvent));
}
postprocess();
}
private void postprocess() {
try (CDC ignore = request.getCdc().restore()) {
request.getMover().close(new CompletionHandler<Void, Void>() {
@Override
public void completed(Void result, Void attachment) {
release();
}
@Override
public void failed(Throwable exc, Void attachment) {
if (exc instanceof DiskErrorCacheException) {
FaultEvent faultEvent = new FaultEvent("post-processing", FaultAction.DISABLED, exc.getMessage(), exc);
_faultListeners.forEach(l -> l.faultOccurred(faultEvent));
}
release();
}
private void release() {
request.done();
_jobs.remove(request.getId());
_moverByRequests.remove(request.getDoorUniqueId());
PrioritizedRequest nextRequest = nextOrRelease();
if (nextRequest != null) {
sendToExecution(nextRequest);
}
}
});
}
}
});
}
}
use of org.dcache.pool.FaultEvent in project dcache by dCache.
the class ReplicaRepository method setReplicaStore.
public void setReplicaStore(ReplicaStore store) {
_stateLock.readLock().lock();
try {
checkUninitialized();
_store = new ReplicaStoreCache(store, new StateChangeListener() {
@Override
public void stateChanged(StateChangeEvent event) {
PnfsId id = event.getPnfsId();
if (event.getOldState() != NEW || event.getNewState() != REMOVED) {
if (event.getOldState() == NEW) {
long size = event.getNewEntry().getReplicaSize();
/* Usually space has to be allocated before writing the
* data to disk, however during pool startup we are notified
* about "new" files that already consume space, so we
* adjust the allocation here.
*/
if (size > 0) {
_account.growTotalAndUsed(id, size);
}
scheduleExpirationTask(event.getNewEntry());
}
updateRemovable(event.getNewEntry());
if (event.getOldState() != PRECIOUS && event.getNewState() == PRECIOUS) {
_account.adjustPrecious(id, event.getNewEntry().getReplicaSize());
} else if (event.getOldState() == PRECIOUS && event.getNewState() != PRECIOUS) {
_account.adjustPrecious(id, -event.getOldEntry().getReplicaSize());
}
_stateChangeListeners.stateChanged(event);
}
switch(event.getNewState()) {
case REMOVED:
if (event.getOldState() != NEW) {
LOGGER.info("remove entry {}: {}", id, event.getWhy());
}
_pnfs.clearCacheLocation(id, _volatile);
ScheduledFuture<?> oldTask = _tasks.remove(id);
if (oldTask != null) {
oldTask.cancel(false);
}
break;
case DESTROYED:
/* It is essential to free after we removed the file: This is the opposite
* of what happens during allocation, in which we allocate before writing
* to disk. We rely on never having anything on disk that we haven't accounted
* for in the Account object.
*/
long size = event.getOldEntry().getReplicaSize();
if (size > 0L) {
_account.free(id, size);
}
break;
}
}
@Override
public void accessTimeChanged(EntryChangeEvent event) {
updateRemovable(event.getNewEntry());
_stateChangeListeners.accessTimeChanged(event);
}
@Override
public void stickyChanged(StickyChangeEvent event) {
updateRemovable(event.getNewEntry());
_stateChangeListeners.stickyChanged(event);
scheduleExpirationTask(event.getNewEntry());
}
}, new FaultListener() {
@Override
public void faultOccurred(FaultEvent event) {
for (FaultListener listener : _faultListeners) {
listener.faultOccurred(event);
}
}
});
} finally {
_stateLock.readLock().unlock();
}
}
Aggregations