use of org.dcache.pool.repository.Repository in project dcache by dCache.
the class Job method schedule.
/**
* Schedules jobs, depending on the current state and available resources.
* <p>
* Closely coupled to the <code>setState</code> method.
*
* @see setState
*/
@GuardedBy("_lock")
private void schedule() {
if (_state == State.CANCELLING && _running.isEmpty()) {
setState(State.CANCELLED);
} else if (_state != State.INITIALIZING && _state != State.NEW && !_definition.isPermanent && _queued.isEmpty() && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.STOPPING && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.RUNNING && (!_definition.sourceList.isValid() || !_definition.poolList.isValid())) {
setState(State.SLEEPING);
} else if (_state == State.RUNNING) {
Iterator<PnfsId> i = _queued.iterator();
while ((_running.size() < _concurrency) && i.hasNext()) {
Expression stopWhen = _definition.stopWhen;
if (stopWhen != null && evaluateLifetimePredicate(stopWhen)) {
stop();
break;
}
Expression pauseWhen = _definition.pauseWhen;
if (pauseWhen != null && evaluateLifetimePredicate(pauseWhen)) {
pause();
break;
}
PnfsId pnfsId = i.next();
if (!_context.lock(pnfsId)) {
addError(new Error(0, pnfsId, "File is locked"));
continue;
}
try {
i.remove();
Repository repository = _context.getRepository();
CacheEntry entry = repository.getEntry(pnfsId);
Task task = new Task(_taskParameters, this, _context.getPoolName(), entry.getPnfsId(), getTargetState(entry), getTargetStickyRecords(entry), getPins(entry), entry.getFileAttributes(), entry.getLastAccessTime());
_running.put(pnfsId, task);
_statistics.addAttempt();
task.run();
} catch (FileNotInCacheException e) {
_sizes.remove(pnfsId);
} catch (CacheException e) {
LOGGER.error("Migration job failed to read entry: {}", e.getMessage());
setState(State.FAILED);
break;
} catch (InterruptedException e) {
LOGGER.error("Migration job was interrupted: {}", e.getMessage());
setState(State.FAILED);
break;
} finally {
if (!_running.containsKey(pnfsId)) {
_context.unlock(pnfsId);
}
}
}
if (_running.isEmpty()) {
if (!_definition.isPermanent && _queued.isEmpty()) {
setState(State.FINISHED);
} else {
setState(State.SLEEPING);
}
}
}
}
use of org.dcache.pool.repository.Repository in project dcache by dCache.
the class Job method populate.
/**
* Scans the repository for files and adds corresponding tasks to the job.
*/
private void populate() throws InterruptedException {
try {
Repository repository = _context.getRepository();
Iterable<PnfsId> files = repository;
if (_definition.comparator != null) {
List<PnfsId> all = new ArrayList<>();
for (PnfsId pnfsId : files) {
all.add(pnfsId);
}
Comparator<PnfsId> order = new CacheEntryOrder(repository, _definition.comparator);
Collections.sort(all, order);
files = all;
}
for (PnfsId pnfsId : files) {
try {
_lock.lock();
try {
if (_state != State.INITIALIZING) {
break;
}
CacheEntry entry = repository.getEntry(pnfsId);
if (accept(entry)) {
add(entry);
}
} finally {
_lock.unlock();
}
} catch (FileNotInCacheException e) {
// File was removed before we got to it - not a
// problem.
} catch (CacheException e) {
LOGGER.error("Failed to load entry: {}", e.getMessage());
}
}
} catch (IllegalStateException e) {
// This means the repository was not initialized yet. Not
// a big problem, since we will be notified about each
// entry during initialization.
}
}
use of org.dcache.pool.repository.Repository in project dcache by dCache.
the class Job method applySourceMode.
/**
* Apply source mode update to replica.
*/
private void applySourceMode(PnfsId pnfsId) {
try {
CacheEntryMode mode = _definition.sourceMode;
Repository repository = _context.getRepository();
CacheEntry entry = repository.getEntry(pnfsId);
switch(mode.state) {
case SAME:
applySticky(pnfsId, mode.stickyRecords);
break;
case DELETE:
if (!isPinned(entry)) {
repository.setState(pnfsId, ReplicaState.REMOVED, "migration job deleting source");
break;
}
// Fall through
case REMOVABLE:
List<StickyRecord> list = mode.stickyRecords;
applySticky(pnfsId, list);
for (StickyRecord record : entry.getStickyRecords()) {
String owner = record.owner();
if (!isPin(record) && !containsOwner(list, owner)) {
repository.setSticky(pnfsId, owner, 0, true);
}
}
repository.setState(pnfsId, ReplicaState.CACHED, "migration job making source removable");
break;
case CACHED:
applySticky(pnfsId, mode.stickyRecords);
repository.setState(pnfsId, ReplicaState.CACHED, "migration job making source cached");
break;
case PRECIOUS:
repository.setState(pnfsId, ReplicaState.PRECIOUS, "migration job making source precious");
applySticky(pnfsId, mode.stickyRecords);
break;
}
} catch (FileNotInCacheException e) {
// File got remove before we could update it. TODO: log it
} catch (IllegalTransitionException e) {
// File is likely about to be removed. TODO: log it
} catch (CacheException e) {
LOGGER.error("Migration job failed to update source mode: {}", e.getMessage());
setState(State.FAILED);
} catch (InterruptedException e) {
LOGGER.error("Migration job was interrupted");
setState(State.FAILED);
}
}
Aggregations