use of org.dcache.resilience.data.FileOperation in project dcache by dCache.
the class FileOperationHandler method handleStaging.
/**
* <p>Called when there are no available replicas, but the file
* can be retrieved from an HSM.</p>
*
* <p>Issues a fire and forget request. Task is considered complete at
* that point.</p>
*
* <p>When staging actually completes on the PoolManager end, the new
* cache location message should be processed by Resilience as a new FileOperation.</p>
*
* <p>Should staging not complete before the pool is once again scanned,
* PoolManager should collapse the repeated staging request.</p>
*/
public void handleStaging(PnfsId pnfsId, ResilientFileTask task) {
try {
FileOperation operation = fileOpMap.getOperation(pnfsId);
FileAttributes attributes = namespace.getRequiredAttributesForStaging(pnfsId);
String poolGroup = poolInfoMap.getGroup(operation.getPoolGroup());
LOGGER.trace("handleStaging {}, pool group {}.", pnfsId, poolGroup);
migrationTaskService.schedule(() -> {
try {
PoolMgrSelectReadPoolMsg msg = new PoolMgrSelectReadPoolMsg(attributes, getProtocolInfo(), null);
msg.setSubject(Subjects.ROOT);
msg.setPoolGroup(poolGroup);
CellMessage cellMessage = new CellMessage(new CellPath(poolManagerAddress), msg);
ACTIVITY_LOGGER.info("Staging {}", pnfsId);
endpoint.sendMessage(cellMessage);
LOGGER.trace("handleStaging, sent select read pool message " + "for {} to poolManager.", pnfsId);
completionHandler.taskCompleted(pnfsId);
} catch (URISyntaxException e) {
completionHandler.taskFailed(pnfsId, CacheExceptionUtils.getCacheException(CacheException.INVALID_ARGS, "could not construct HTTP protocol: %s.", pnfsId, Type.WAIT_FOR_STAGE, e.getMessage(), null));
}
}, 0, TimeUnit.MILLISECONDS);
} catch (CacheException ce) {
completionHandler.taskFailed(pnfsId, ce);
}
}
use of org.dcache.resilience.data.FileOperation in project dcache by dCache.
the class FileOperationHandler method handlePromoteToSticky.
public void handlePromoteToSticky(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
String target = poolInfoMap.getPool(operation.getTarget());
try {
promoteToSticky(pnfsId, target);
completionHandler.taskCompleted(pnfsId);
} catch (CacheException e) {
completionHandler.taskFailed(pnfsId, e);
}
}
use of org.dcache.resilience.data.FileOperation in project dcache by dCache.
the class FileOperationHandler method handleMakeOneCopy.
/**
* <p>Wraps the creation of a migration {@link Task}. The task is given
* a static single pool list and a degenerate selection strategy, since the target has already
* been selected by this handler.</p>
*/
public Task handleMakeOneCopy(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
LOGGER.trace("Configuring migration task for {}.", pnfsId);
StaticSinglePoolList list;
try {
list = new StaticSinglePoolList(poolInfoMap.getPoolManagerInfo(operation.getTarget()));
} catch (NoSuchElementException e) {
CacheException exception = CacheExceptionUtils.getCacheException(CacheException.NO_POOL_CONFIGURED, "Copy %s, could not get PoolManager info for %s: %s.", pnfsId, Type.COPY, poolInfoMap.getPool(operation.getTarget()), e);
completionHandler.taskFailed(pnfsId, exception);
return null;
}
String source = poolInfoMap.getPool(operation.getSource());
TaskParameters taskParameters = new TaskParameters(pools, // PnfsManager cell stub not used
null, pinManager, migrationTaskService, taskSelectionStrategy, list, // eager; update should not happen
false, // isMetaOnly; just move the metadata
false, // compute checksum on update; should not happen
false, // force copy even if pool not readable
false, // maintain atime
true, 1);
Task task = new Task(taskParameters, completionHandler, source, pnfsId, ReplicaState.CACHED, ONLINE_STICKY_RECORD, Collections.EMPTY_LIST, attributes, attributes.getAccessTime());
if (ACTIVITY_LOGGER.isInfoEnabled()) {
List<String> allPools = list.getPools().stream().map(PoolManagerPoolInformation::getName).collect(Collectors.toList());
ACTIVITY_LOGGER.info("Initiating replication of {} from {} to" + " pools: {}, offline: {}", pnfsId, source, allPools, list.getOfflinePools());
}
LOGGER.trace("Created migration task for {}: source {}, list {}.", pnfsId, source, list);
return task;
}
use of org.dcache.resilience.data.FileOperation in project dcache by dCache.
the class FileOperationHandler method handleRemoveOneCopy.
/**
* <p>Calls {@link #removeTarget(PnfsId, String)} and then reports
* success or failure to the completion handler.</p>
*/
public void handleRemoveOneCopy(FileAttributes attributes) {
PnfsId pnfsId = attributes.getPnfsId();
FileOperation operation = fileOpMap.getOperation(pnfsId);
try {
String target = poolInfoMap.getPool(operation.getTarget());
LOGGER.trace("handleRemoveOneCopy {}, removing {}.", pnfsId, target);
removeTarget(pnfsId, target);
} catch (CacheException e) {
completionHandler.taskFailed(pnfsId, e);
}
completionHandler.taskCompleted(pnfsId);
}
use of org.dcache.resilience.data.FileOperation in project dcache by dCache.
the class CheckpointUtils method save.
/**
* <p>Since we use checkpointing as an approximation,
* the fact that the ConcurrentMap (internal to the deque class) may be dirty and that it is not
* locked should not matter greatly.</p>
*
* @param checkpointFilePath where to write.
* @param poolInfoMap for translation of indices to names.
* @param iterator from a ConcurrentHashMap implementation of the index.
* @return number of records written
*/
public static long save(String checkpointFilePath, PoolInfoMap poolInfoMap, Iterator<FileOperation> iterator) {
File current = new File(checkpointFilePath);
File old = new File(checkpointFilePath + "-old");
if (current.exists()) {
current.renameTo(old);
}
AtomicLong count = new AtomicLong(0);
StringBuilder builder = new StringBuilder();
try (PrintWriter fw = new PrintWriter(new FileWriter(checkpointFilePath, false))) {
while (iterator.hasNext()) {
FileOperation operation = iterator.next();
if (toString(operation, builder, poolInfoMap)) {
fw.println(builder.toString());
count.incrementAndGet();
builder.setLength(0);
}
}
} catch (FileNotFoundException e) {
LOGGER.error("Unable to save checkpoint file: {}", e.getMessage());
} catch (IOException e) {
LOGGER.error("Unrecoverable error during save of checkpoint file: {}", e.getMessage());
}
return count.get();
}
Aggregations