use of diskCacheV111.util.CacheException in project dcache by dCache.
the class XrootdRedirectHandler method conditionallyHandleThirdPartyRequest.
/**
* <p>Special handling of third-party requests. Distinguishes among
* several different cases for the open and either returns a response directly to the caller or
* proceeds with the usual mover open and redirect to the pool by returning <code>null</code>.
* Also verifies the rendezvous information in the case of the destination server contacting
* dCache as source.</p>
*
* <p>With the modified TPC lite (delegation) protocol, there is no
* need to wait for the rendezvous destination check by comparing the open from the source.</p>
*
* <p>There is also the case where no delegated proxy exists but
* a different authentication protocol (like ZTN/scitokens) is being used. It seems that even
* with delegation in this case the initiating client does not call open. A check for authz in
* the opaque data has been added (03/21/2021).</p>
*/
private XrootdResponse<OpenRequest> conditionallyHandleThirdPartyRequest(OpenRequest req, LoginSessionInfo loginSessionInfo, Map<String, String> opaque, FsPath fsPath, String remoteHost) throws CacheException, XrootdException, ParseException {
if (!_door.isReadAllowed(fsPath)) {
throw new PermissionDeniedCacheException("Read permission denied");
}
Subject subject = loginSessionInfo.getSubject();
Restriction restriction = loginSessionInfo.getRestriction();
if ("placement".equals(opaque.get("tpc.stage"))) {
FileStatus status = _door.getFileStatus(fsPath, subject, restriction, remoteHost);
int fd = _door.nextTpcPlaceholder();
_log.debug("placement response to {} sent to {} with fhandle {}.", req, remoteHost, fd);
return new OpenResponse(req, fd, null, null, status);
}
String tpcKey = opaque.get("tpc.key");
if (tpcKey == null) {
_log.debug("{} –– not a third-party request.", req);
// proceed as usual with mover + redirect
return null;
}
if (opaque.containsKey(Cgi.AUTHZ.key())) {
_log.debug("{} –– request contains authorization token.", req);
// proceed as usual with mover + redirect
return null;
}
enforceClientTlsIfDestinationRequiresItForTpc(opaque);
/*
* Check the session for the delegated credential to avoid hanging
* in the case that tpc cgi have been passed by the destination
* server even with TPC with delegation.
*/
if (req.getSession().getDelegatedCredential() != null) {
_log.debug("{} –– third-party request with delegation.", req);
// proceed as usual with mover + redirect
return null;
}
String slfn = req.getPath();
XrootdTpcInfo info = _door.createOrGetRendezvousInfo(tpcKey);
/*
* The request originated from the TPC destination server.
* If the client has not yet opened the file here,
* tells the destination to wait. If the verification, including
* time to live, fails, the request is cancelled. Otherwise,
* the destination is allowed to open the mover and get the
* normal redirect response.
*
* Note that the tpc info is created by either the client or the
* server, whichever gets here first. Verification of the key
* itself is implicit (it has been found in the map); correctness is
* further satisfied by matching org, host and file name.
*/
if (opaque.containsKey("tpc.org")) {
info.addInfoFromOpaque(slfn, opaque);
switch(info.verify(remoteHost, slfn, opaque.get("tpc.org"))) {
case READY:
_log.debug("Open request {} from destination server, info {}: " + "OK to proceed.", req, info);
/*
* This means that the destination server open arrived
* second, the client server open succeeded with
* the correct permissions; proceed as usual
* with mover + redirect.
*/
return null;
case PENDING:
_log.debug("Open request {} from destination server, info {}: " + "PENDING client open.", req, info);
/*
* This means that the destination server open arrived
* first; return a wait-retry reply.
*/
return new AwaitAsyncResponse<>(req, 3);
case CANCELLED:
String error = info.isExpired() ? "ttl expired" : "dst, path or org" + " did not match";
_log.warn("Open request {} from destination server, info {}: " + "CANCELLED: {}.", req, info, error);
_door.removeTpcPlaceholder(info.getFd());
return withError(req, kXR_InvalidRequest, "tpc rendezvous for " + tpcKey + ": " + error);
case ERROR:
/*
* This means that the destination server requested open
* before the client did, and the client did not have
* read permissions on this file.
*/
error = "invalid open request (file permissions).";
_log.warn("Open request {} from destination server, info {}: " + "ERROR: {}.", req, info, error);
_door.removeTpcPlaceholder(info.getFd());
return withError(req, kXR_InvalidRequest, "tpc rendezvous for " + tpcKey + ": " + error);
}
}
/*
* The request originated from the TPC client, indicating door
* is the source.
*/
if (opaque.containsKey("tpc.dst")) {
_log.debug("Open request {} from client to door as source, " + "info {}: OK.", req, info);
FileStatus status = _door.getFileStatus(fsPath, subject, restriction, remoteHost);
int flags = status.getFlags();
if ((flags & kXR_readable) != kXR_readable) {
/*
* Update the info with ERROR, so when the destination checks
* it, an error can be returned.
*/
info.setStatus(Status.ERROR);
return withError(req, kXR_InvalidRequest, "not allowed to read file.");
}
info.addInfoFromOpaque(slfn, opaque);
return new OpenResponse(req, info.getFd(), null, null, status);
}
/*
* The request originated from the TPC client, indicating door
* is the destination.
*
* First check for TLS capability if this is required.
*
* Remove the rendezvous info (not needed),
* allow mover to start and redirect the client to the pool.
*
* It is not necessary to delegate the tpc information through the
* protocol, particularly the rendezvous key, because it is part of
* the opaque data, and if any of the opaque tpc info is missing
* from redirected call to the pool, the transfer will fail.
*
* However, the calling method will need to fetch a delegated
* proxy credential and add that to the protocol.
*/
if (opaque.containsKey("tpc.src")) {
_log.debug("Open request {} from client to door as destination: OK;" + "removing info {}.", req, info);
_door.removeTpcPlaceholder(info.getFd());
// proceed as usual with mover + redirect
return null;
}
/*
* Something went wrong.
*/
String error = String.format("Request metadata is invalid: %s: %s, %s.", req, fsPath, remoteHost);
throw new CacheException(CacheException.THIRD_PARTY_TRANSFER_FAILED, error);
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class XrootdRedirectHandler method doOnStatx.
@Override
protected XrootdResponse<StatxRequest> doOnStatx(ChannelHandlerContext ctx, StatxRequest req) throws XrootdException {
if (req.getPaths().length == 0) {
throw new XrootdException(kXR_ArgMissing, "no paths specified");
}
try {
FsPath[] paths = new FsPath[req.getPaths().length];
for (int i = 0; i < paths.length; i++) {
paths[i] = createFullPath(req.getPaths()[i]);
}
LoginSessionInfo loginSessionInfo = sessionInfo();
Subject subject = loginSessionInfo.getSubject();
Restriction restriction = loginSessionInfo.getRestriction();
return new StatxResponse(req, _door.getMultipleFileStatuses(paths, subject, restriction));
} catch (TimeoutCacheException e) {
throw xrootdException(e.getRc(), "Internal timeout");
} catch (PermissionDeniedCacheException e) {
throw xrootdException(e);
} catch (CacheException e) {
throw xrootdException(e.getRc(), String.format("Failed to open file (%s [%d])", e.getMessage(), e.getRc()));
}
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class CopyFilter method fetchCredential.
private Object fetchCredential(CredentialSource source) throws InterruptedException, ErrorResponseException {
Subject subject = Subject.getSubject(AccessController.getContext());
switch(source) {
case GRIDSITE:
try {
HttpServletRequest request = ServletRequest.getRequest();
// Use the X.509 identity from TLS, even if that wasn't used to
// establish the user's identity. This allows the local activity of
// the COPY (i.e., the ability to read a file, or create a new file)
// to be authorized based on some non-X.509 identity, while using a
// delegated X.509 credential when authenticating for the
// third-party copy, based on the client credential presented when
// establishing the TLS connection.
Subject x509Subject = AuthenticationHandler.getX509Identity(request);
String dn = x509Subject == null ? null : Subjects.getDn(x509Subject);
if (dn == null) {
throw new ErrorResponseException(Response.Status.SC_UNAUTHORIZED, "user must present valid X.509 certificate");
}
String fqan = Objects.toString(Subjects.getPrimaryFqan(x509Subject), null);
/* If delegation has been requested and declined then
* potentially use the existing delegated credential. We don't
* want to artifically fail requests that might otherwise
* succeed.
*/
int minLifetimeInMinutes = hasClientAlreadyBeenRedirected(request) ? 2 : 20;
return _credentialService.getDelegatedCredential(dn, fqan, minLifetimeInMinutes, MINUTES);
} catch (PermissionDeniedCacheException e) {
throw new ErrorResponseException(Status.SC_UNAUTHORIZED, "Presented X.509 certificate not valid");
} catch (CacheException e) {
throw new ErrorResponseException(Status.SC_INTERNAL_SERVER_ERROR, "Internal problem: " + e.getMessage());
}
case OIDC:
BearerTokenCredential bearer = subject.getPrivateCredentials().stream().filter(BearerTokenCredential.class::isInstance).map(BearerTokenCredential.class::cast).findFirst().orElseThrow(() -> new ErrorResponseException(Status.SC_UNAUTHORIZED, "User must authenticate with OpenID for " + "OpenID delegation"));
return _credentialService.getDelegatedCredential(bearer.getToken(), _oidcClientCredentials);
case NONE:
return null;
default:
throw new RuntimeException("Unsupported source " + source);
}
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class DcacheResource method setProperty.
@Override
public void setProperty(QName qname, Object o) throws PropertySource.PropertySetException, NotAuthorizedException {
if (!qname.getNamespaceURI().equals(XATTR_NAMESPACE_URI)) {
throw new NotAuthorizedException("Property not modifiable", this);
}
String name = qname.getLocalPart();
try {
if (o == null) {
try {
_factory.removeExtendedAttribute(_path, name);
} catch (NoAttributeCacheException e) {
// RFC 4918 14.23 "Specifying the removal of a property
// that does not exist is not an error."
}
} else {
byte[] data;
if (o instanceof String) {
data = ((String) o).getBytes(StandardCharsets.UTF_8);
} else {
LOGGER.warn("set property called with unexpected value" + " type {}", o.getClass().getCanonicalName());
data = String.valueOf(o).getBytes(StandardCharsets.UTF_8);
}
_factory.writeExtendedAttribute(_path, name, data);
}
} catch (PermissionDeniedCacheException e) {
throw new NotAuthorizedException("Permission denied", this);
} catch (FileNotFoundCacheException e) {
throw new PropertySource.PropertySetException(Response.Status.SC_NOT_FOUND, "File does not exist");
} catch (CacheException e) {
LOGGER.error("setProperty on {} to {} failed: {}", qname, o, e.getMessage());
throw new PropertySource.PropertySetException(Response.Status.SC_INTERNAL_SERVER_ERROR, e.getMessage());
}
}
use of diskCacheV111.util.CacheException in project dcache by dCache.
the class DcacheResourceFactory method deleteFile.
/**
* Deletes a file.
*/
public void deleteFile(FileAttributes attributes, FsPath path) throws CacheException {
PnfsHandler pnfs = roleAwarePnfsHandler();
pnfs.deletePnfsEntry(attributes.getPnfsId(), path.toString(), EnumSet.of(REGULAR, LINK), EnumSet.noneOf(FileAttribute.class));
sendRemoveInfoToBilling(attributes, path);
}
Aggregations