use of diskCacheV111.util.CacheException in project dcache by dCache.
the class StrategyIdMapper method principalToGid.
@Override
public int principalToGid(String name) {
try {
String principal = stripDomain(name);
Principal gidPrincipal = _remoteLoginStrategy.map(new GroupNamePrincipal(principal));
if (gidPrincipal instanceof GidPrincipal) {
return (int) ((GidPrincipal) gidPrincipal).getGid();
}
} catch (CacheException e) {
LOGGER.debug("Failed to map principal {} : {}", name, e);
}
return tryNumericIfAllowed(name);
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class StrategyIdMapper method principalToUid.
@Override
public int principalToUid(String name) {
try {
String principal = stripDomain(name);
Principal uidPrincipal = _remoteLoginStrategy.map(new UserNamePrincipal(principal));
if (uidPrincipal instanceof UidPrincipal) {
return (int) ((UidPrincipal) uidPrincipal).getUid();
}
} catch (CacheException e) {
LOGGER.debug("Failed to map principal {} : {}", name, e);
}
return tryNumericIfAllowed(name);
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class StrategyIdMapper method login.
@Override
public Subject login(RpcTransport xt, GSSContext gssc) {
try {
KerberosPrincipal principal = new KerberosPrincipal(gssc.getSrcName().toString());
Subject in = new Subject();
in.getPrincipals().add(principal);
in.getPrincipals().add(new Origin(xt.getRemoteSocketAddress().getAddress()));
in.setReadOnly();
return populateWithUnixPrincipals(_remoteLoginStrategy.login(in).getSubject());
} catch (GSSException | CacheException e) {
LOGGER.debug("Failed to login for : {} : {}", gssc, e.toString());
}
return Subjects.NOBODY;
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class RemoteHttpDataTransferProtocol method runIO.
@Override
public void runIO(FileAttributes attributes, RepositoryChannel channel, ProtocolInfo genericInfo, Set<? extends OpenOption> access) throws CacheException, IOException, InterruptedException {
LOGGER.debug("info={}, attributes={}, access={}", genericInfo, attributes, access);
RemoteHttpDataTransferProtocolInfo info = (RemoteHttpDataTransferProtocolInfo) genericInfo;
_channel = new MoverChannel<>(access, attributes, info, channel);
channel.optionallyAs(ChecksumChannel.class).ifPresent(c -> {
info.getDesiredChecksum().ifPresent(t -> {
try {
c.addType(t);
} catch (IOException e) {
LOGGER.warn("Unable to calculate checksum {}: {}", t, messageOrClassName(e));
}
});
});
try {
if (access.contains(StandardOpenOption.WRITE)) {
receiveFile(info);
} else {
checkThat(!info.isVerificationRequired() || attributes.isDefined(CHECKSUM), "checksum verification failed: file has no checksum");
sendAndCheckFile(info);
}
} finally {
afterTransfer();
}
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class Job method schedule.
/**
* Schedules jobs, depending on the current state and available resources.
* <p>
* Closely coupled to the <code>setState</code> method.
*
* @see setState
*/
@GuardedBy("_lock")
private void schedule() {
if (_state == State.CANCELLING && _running.isEmpty()) {
setState(State.CANCELLED);
} else if (_state != State.INITIALIZING && _state != State.NEW && !_definition.isPermanent && _queued.isEmpty() && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.STOPPING && _running.isEmpty()) {
setState(State.FINISHED);
} else if (_state == State.RUNNING && (!_definition.sourceList.isValid() || !_definition.poolList.isValid())) {
setState(State.SLEEPING);
} else if (_state == State.RUNNING) {
Iterator<PnfsId> i = _queued.iterator();
while ((_running.size() < _concurrency) && i.hasNext()) {
Expression stopWhen = _definition.stopWhen;
if (stopWhen != null && evaluateLifetimePredicate(stopWhen)) {
stop();
break;
}
Expression pauseWhen = _definition.pauseWhen;
if (pauseWhen != null && evaluateLifetimePredicate(pauseWhen)) {
pause();
break;
}
PnfsId pnfsId = i.next();
if (!_context.lock(pnfsId)) {
addError(new Error(0, pnfsId, "File is locked"));
continue;
}
try {
i.remove();
Repository repository = _context.getRepository();
CacheEntry entry = repository.getEntry(pnfsId);
Task task = new Task(_taskParameters, this, _context.getPoolName(), entry.getPnfsId(), getTargetState(entry), getTargetStickyRecords(entry), getPins(entry), entry.getFileAttributes(), entry.getLastAccessTime());
_running.put(pnfsId, task);
_statistics.addAttempt();
task.run();
} catch (FileNotInCacheException e) {
_sizes.remove(pnfsId);
} catch (CacheException e) {
LOGGER.error("Migration job failed to read entry: {}", e.getMessage());
setState(State.FAILED);
break;
} catch (InterruptedException e) {
LOGGER.error("Migration job was interrupted: {}", e.getMessage());
setState(State.FAILED);
break;
} finally {
if (!_running.containsKey(pnfsId)) {
_context.unlock(pnfsId);
}
}
}
if (_running.isEmpty()) {
if (!_definition.isPermanent && _queued.isEmpty()) {
setState(State.FINISHED);
} else {
setState(State.SLEEPING);
}
}
}
}
Aggregations