use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class FileOperationHandler method handlePromoteToSticky.
public void handlePromoteToSticky(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
String target = poolInfoMap.getPool(operation.getTarget());
try {
promoteToSticky(pnfsId, target);
completionHandler.taskCompleted(pnfsId);
} catch (CacheException e) {
completionHandler.taskFailed(pnfsId, e);
}
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class FileOperationHandler method determineTypeFromConstraints.
/**
* <p>Checks the readable locations against the requirements.
* If previous operations on this pnfsId have already satisfied them, the operation should be
* voided.</p>
*
* @param operation -- on the given pnfsid
* @param excluded -- number of member pools manually excluded by admins
* @param occupied -- group member pools with a replica in any state
* @param sticky -- group member replicas that are sticky
* @param nonSticky -- group member replicas that are not sticky
* @param verified -- the messages returned by the pools
* @return the type of operation which should take place, if any.
*/
private Type determineTypeFromConstraints(FileOperation operation, int excluded, Set<String> occupied, Set<String> sticky, Set<String> nonSticky, Collection verified) {
PnfsId pnfsId = operation.getPnfsId();
Integer gindex = operation.getPoolGroup();
Integer sindex = operation.getStorageUnit();
LOGGER.trace("determineTypeFromConstraints {}, group {}, unit {}.", pnfsId, gindex, sindex);
StorageUnitConstraints constraints = poolInfoMap.getStorageUnitConstraints(sindex);
int required = constraints.getRequired();
int missing = required - sticky.size();
/*
* First compute the missing files on the basis of just the readable
* files. If this is positive, recompute by adding in all the
* excluded locations. If these satisfy the requirement, void
* the operation. Do no allow removes in this case, since this
* would imply decreasing already deficient locations.
*/
if (missing > 0) {
missing -= excluded;
if (missing < 0) {
missing = 0;
}
}
Collection<String> tags = constraints.getOneCopyPer();
LOGGER.trace("{}, required {}, excluded {}, missing {}.", pnfsId, required, excluded, missing);
Type type;
String source = null;
String target = null;
try {
/*
* Note that if the operation source or target is preset,
* and the location is valid, the selection is skipped.
*/
if (missing < 0) {
Integer index = operation.getTarget();
if (index == null || !poolInfoMap.isPoolViable(index, true) || !verifier.isRemovable(poolInfoMap.getPool(index), verified)) {
Set<String> removable = verifier.areRemovable(sticky, verified);
target = locationSelector.selectRemoveTarget(operation, sticky, removable, tags);
}
LOGGER.trace("target to remove: {}", target);
type = Type.REMOVE;
} else if (missing > 0) {
Integer viableSource = operation.getSource();
if (viableSource != null && !poolInfoMap.isPoolViable(viableSource, false)) {
viableSource = null;
}
Integer targetIndex = operation.getTarget();
if (targetIndex == null) {
/*
* See if we can avoid a copy by promoting an existing
* non-sticky replica to sticky.
*
* If the source pool is actually a non-sticky replica,
* choose that first.
*/
if (viableSource != null) {
source = poolInfoMap.getPool(viableSource);
if (nonSticky.contains(source)) {
fileOpMap.updateOperation(pnfsId, null, source);
LOGGER.trace("promoting source to sticky: {}", source);
return Type.SET_STICKY;
}
}
target = locationSelector.selectPromotionTarget(operation, sticky, nonSticky, tags);
if (target != null) {
fileOpMap.updateOperation(pnfsId, null, target);
LOGGER.trace("target to promote to sticky: {}", target);
return Type.SET_STICKY;
}
target = locationSelector.selectCopyTarget(operation, gindex, occupied, tags);
} else if (!poolInfoMap.isPoolViable(targetIndex, true)) {
target = locationSelector.selectCopyTarget(operation, gindex, occupied, tags);
}
LOGGER.trace("target to copy: {}", target);
/*
* 'sticky' may contain both readable and waiting
* ('from') replicas. To avoid failure/retry,
* choose only the readable. If there is only
* an incomplete source, then use it tentatively.
*/
Set<String> strictlyReadable = verifier.areReadable(sticky, verified);
if (viableSource == null) {
source = locationSelector.selectCopySource(operation, strictlyReadable.isEmpty() ? sticky : strictlyReadable);
}
LOGGER.trace("source: {}", source);
type = Type.COPY;
} else {
LOGGER.trace("Nothing to do, VOID operation for {}", pnfsId);
fileOpMap.voidOperation(pnfsId);
return Type.VOID;
}
} catch (LocationSelectionException e) {
CacheException exception = CacheExceptionUtils.getCacheException(CacheException.DEFAULT_ERROR_CODE, FileTaskCompletionHandler.VERIFY_FAILURE_MESSAGE, pnfsId, Type.VOID, null, e);
completionHandler.taskFailed(pnfsId, exception);
return Type.VOID;
}
fileOpMap.updateOperation(pnfsId, source, target);
return type;
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class FileOperationHandler method handleMakeOneCopy.
/**
* <p>Wraps the creation of a migration {@link Task}. The task is given
* a static single pool list and a degenerate selection strategy, since the target has already
* been selected by this handler.</p>
*/
public Task handleMakeOneCopy(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
LOGGER.trace("Configuring migration task for {}.", pnfsId);
StaticSinglePoolList list;
try {
list = new StaticSinglePoolList(poolInfoMap.getPoolManagerInfo(operation.getTarget()));
} catch (NoSuchElementException e) {
CacheException exception = CacheExceptionUtils.getCacheException(CacheException.NO_POOL_CONFIGURED, "Copy %s, could not get PoolManager info for %s: %s.", pnfsId, Type.COPY, poolInfoMap.getPool(operation.getTarget()), e);
completionHandler.taskFailed(pnfsId, exception);
return null;
}
String source = poolInfoMap.getPool(operation.getSource());
TaskParameters taskParameters = new TaskParameters(pools, // PnfsManager cell stub not used
null, pinManager, migrationTaskService, taskSelectionStrategy, list, // eager; update should not happen
false, // isMetaOnly; just move the metadata
false, // compute checksum on update; should not happen
false, // force copy even if pool not readable
false, // maintain atime
true, 1);
Task task = new Task(taskParameters, completionHandler, source, pnfsId, ReplicaState.CACHED, ONLINE_STICKY_RECORD, Collections.EMPTY_LIST, attributes, attributes.getAccessTime());
if (ACTIVITY_LOGGER.isInfoEnabled()) {
List<String> allPools = list.getPools().stream().map(PoolManagerPoolInformation::getName).collect(Collectors.toList());
ACTIVITY_LOGGER.info("Initiating replication of {} from {} to" + " pools: {}, offline: {}", pnfsId, source, allPools, list.getOfflinePools());
}
LOGGER.trace("Created migration task for {}: source {}, list {}.", pnfsId, source, list);
return task;
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class FileOperationHandler method handleRemoveOneCopy.
/**
* <p>Calls {@link #removeTarget(PnfsId, String)} and then reports
* success or failure to the completion handler.</p>
*/
public void handleRemoveOneCopy(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
try {
String target = poolInfoMap.getPool(operation.getTarget());
LOGGER.trace("handleRemoveOneCopy {}, removing {}.", pnfsId, target);
removeTarget(pnfsId, target);
} catch (CacheException e) {
completionHandler.taskFailed(pnfsId, e);
}
completionHandler.taskCompleted(pnfsId);
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class ResilienceMessageHandler method messageArrived.
/**
* <p>Returns whether replica exists, the status of its system sticky flag
* and whether its state allows for reading and removal.</p>
*/
public Reply messageArrived(ReplicaStatusMessage message) {
MessageReply<Message> reply = new MessageReply<>();
executor.execute(() -> {
PnfsId pnfsId = message.getPnfsId();
if (pnfsId == null) {
reply.fail(message, new IllegalArgumentException("no pnfsid"));
return;
}
try {
CacheEntry entry = repository.getEntry(pnfsId);
message.setExists(true);
switch(entry.getState()) {
case FROM_CLIENT:
case FROM_POOL:
case FROM_STORE:
message.setWaiting(true);
break;
case CACHED:
message.setReadable(true);
message.setRemovable(true);
break;
case BROKEN:
message.setBroken(true);
message.setRemovable(true);
break;
case PRECIOUS:
message.setReadable(true);
break;
default:
break;
}
Collection<StickyRecord> records = entry.getStickyRecords();
for (StickyRecord record : records) {
if (record.owner().equals(SYSTEM_OWNER) && record.isNonExpiring()) {
message.setSystemSticky(true);
break;
}
}
reply.reply(message);
} catch (FileNotInCacheException e) {
reply.reply(message);
} catch (Exception e) {
reply.fail(message, e);
}
});
return reply;
}
Aggregations