use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class Job method schedule.
/**
* Schedules jobs, depending on the current state and available resources.
* <p>
* Closely coupled to the <code>setState</code> method.
*
* @see setState
*/
@GuardedBy("_lock")
private void schedule() {
if (_state == State.CANCELLING && _running.isEmpty()) {
setState(State.CANCELLED);
} else if (_state != State.INITIALIZING && _state != State.NEW && !_definition.isPermanent && _queued.isEmpty() && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.STOPPING && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.RUNNING && (!_definition.sourceList.isValid() || !_definition.poolList.isValid())) {
setState(State.SLEEPING);
} else if (_state == State.RUNNING) {
Iterator<PnfsId> i = _queued.iterator();
while ((_running.size() < _concurrency) && i.hasNext()) {
Expression stopWhen = _definition.stopWhen;
if (stopWhen != null && evaluateLifetimePredicate(stopWhen)) {
stop();
break;
}
Expression pauseWhen = _definition.pauseWhen;
if (pauseWhen != null && evaluateLifetimePredicate(pauseWhen)) {
pause();
break;
}
PnfsId pnfsId = i.next();
if (!_context.lock(pnfsId)) {
addError(new Error(0, pnfsId, "File is locked"));
continue;
}
try {
i.remove();
Repository repository = _context.getRepository();
CacheEntry entry = repository.getEntry(pnfsId);
Task task = new Task(_taskParameters, this, _context.getPoolName(), entry.getPnfsId(), getTargetState(entry), getTargetStickyRecords(entry), getPins(entry), entry.getFileAttributes(), entry.getLastAccessTime());
_running.put(pnfsId, task);
_statistics.addAttempt();
task.run();
} catch (FileNotInCacheException e) {
_sizes.remove(pnfsId);
} catch (CacheException e) {
LOGGER.error("Migration job failed to read entry: {}", e.getMessage());
setState(State.FAILED);
break;
} catch (InterruptedException e) {
LOGGER.error("Migration job was interrupted: {}", e.getMessage());
setState(State.FAILED);
break;
} finally {
if (!_running.containsKey(pnfsId)) {
_context.unlock(pnfsId);
}
}
}
if (_running.isEmpty()) {
if (!_definition.isPermanent && _queued.isEmpty()) {
setState(State.FINISHED);
} else {
setState(State.SLEEPING);
}
}
}
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class Job method add.
/**
* Adds a new task to the job.
*/
@GuardedBy("_lock")
private void add(CacheEntry entry) {
PnfsId pnfsId = entry.getPnfsId();
if (!_queued.contains(pnfsId) && !_running.containsKey(pnfsId)) {
long size = entry.getReplicaSize();
_queued.add(pnfsId);
_sizes.put(pnfsId, size);
_statistics.addToTotal(size);
schedule();
}
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class Job method entryChanged.
private void entryChanged(EntryChangeEvent event) {
PnfsId pnfsId = event.getPnfsId();
CacheEntry entry = event.getNewEntry();
if (!accept(entry)) {
_lock.lock();
try {
if (!_running.containsKey(pnfsId)) {
String type = event instanceof StickyChangeEvent ? "sticky" : "atime";
remove(pnfsId, type + " changed, so file no longer matches criteria");
}
} finally {
_lock.unlock();
}
} else if (_definition.isPermanent && !accept(event.getOldEntry())) {
_lock.lock();
try {
add(entry);
} finally {
_lock.unlock();
}
}
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class Job method taskFailed.
/**
* Callback from task: Task failed, reschedule it.
*/
@Override
public void taskFailed(Task task, int rc, String msg) {
_lock.lock();
try {
PnfsId pnfsId = task.getPnfsId();
if (task == _running.remove(pnfsId)) {
_queued.add(pnfsId);
_context.unlock(pnfsId);
}
if (_state == State.RUNNING) {
setState(State.SLEEPING);
} else {
schedule();
}
addError(new Error(task.getId(), pnfsId, msg));
} finally {
_lock.unlock();
}
}
use of diskCacheV111.util.PnfsId in project dcache by dCache.
the class NearlineStorageHandler method stateChanged.
@Override
public void stateChanged(StateChangeEvent event) {
if (event.getNewState() == ReplicaState.REMOVED) {
PnfsId pnfsId = event.getPnfsId();
stageRequests.cancel(pnfsId);
flushRequests.cancel(pnfsId);
}
}
Aggregations