use of diskCacheV111.util.FileExistsCacheException in project dcache by dCache.
the class XrootdDoor method write.
public XrootdTransfer write(InetSocketAddress client, FsPath path, Set<String> tried, String ioQueue, UUID uuid, boolean createDir, boolean overwrite, Long size, OptionalLong maxUploadSize, InetSocketAddress local, Subject subject, Restriction restriction, boolean persistOnSuccessfulClose, FsPath rootPath, Serializable delegatedProxy, Map<String, String> opaque) throws CacheException, InterruptedException, ParseException {
if (!isWriteAllowed(path)) {
throw new PermissionDeniedCacheException("Write permission denied");
}
FsPath transferPath = persistOnSuccessfulClose ? getUploadPath(subject, restriction, createDir, overwrite, size, path, rootPath) : path;
XrootdTransfer transfer = createTransfer(client, transferPath, tried, ioQueue, uuid, local, subject, restriction, opaque);
transfer.setOverwriteAllowed(overwrite);
/*
* If this is a destination door/server and the session
* does not contain a proxy, eventually fail downstream.
*/
transfer.setDelegatedCredential(delegatedProxy);
int handle = transfer.getFileHandle();
InetSocketAddress address = null;
_transfers.put(handle, transfer);
String explanation = "problem within door";
try {
try {
if (createDir) {
transfer.createNameSpaceEntryWithParents();
} else {
transfer.createNameSpaceEntry();
}
} catch (FileExistsCacheException e) {
transfer.readNameSpaceEntry(true);
if (transfer.getFileAttributes().getStorageInfo().isCreatedOnly()) {
transfer.setOverwriteAllowed(true);
transfer.createNameSpaceEntry();
} else {
throw e;
}
}
maxUploadSize.ifPresent(transfer::setMaximumLength);
if (size != null) {
checkResourceNotMissing(!maxUploadSize.isPresent() || size <= maxUploadSize.getAsLong(), "File exceeds maximum upload size");
transfer.setLength(size);
}
try {
transfer.selectPoolAndStartMover(RETRY_POLICY);
address = transfer.waitForRedirect(_moverTimeout, _moverTimeoutUnit);
if (address == null) {
throw new CacheException(transfer.getPool() + " failed to open TCP socket");
}
transfer.setStatus("Mover " + transfer.getPool() + "/" + transfer.getMoverId() + ": Receiving");
} finally {
if (address == null) {
transfer.deleteNameSpaceEntry();
}
}
} catch (CacheException e) {
explanation = e.getMessage();
transfer.notifyBilling(e.getRc(), e.getMessage());
throw e;
} catch (InterruptedException e) {
explanation = "transfer interrupted";
transfer.notifyBilling(CacheException.UNEXPECTED_SYSTEM_EXCEPTION, "Transfer interrupted");
throw e;
} catch (RuntimeException e) {
explanation = "bug found: " + e.toString();
transfer.notifyBilling(CacheException.UNEXPECTED_SYSTEM_EXCEPTION, e.toString());
throw e;
} finally {
if (address == null) {
transfer.killMover(0, "killed by door: " + explanation);
_transfers.remove(handle);
}
}
return transfer;
}
use of diskCacheV111.util.FileExistsCacheException in project dcache by dCache.
the class NearlineStorageHandlerTest method testStageQueueStateOnError.
@Test
public void testStageQueueStateOnError() throws CacheException {
var attr = given(aFile().withStorageClass("a:b", "foo").withSize(34567));
when(repository.createEntry(any(), any(), any(), any(), any(), any())).thenThrow(new FileExistsCacheException("injected"));
nsh.stage("foo", attr, hsmMigrationRequestCallack);
assertThat(nsh.getActiveFetchJobs(), is(0));
assertThat(nsh.getFetchQueueSize(), is(0));
verify(hsmMigrationRequestCallack).failed(any(), any());
}
use of diskCacheV111.util.FileExistsCacheException in project dcache by dCache.
the class ChimeraNameSpaceProvider method commitUpload.
@Override
public FileAttributes commitUpload(Subject subject, FsPath temporaryPath, FsPath finalPath, Set<CreateOption> options, Set<FileAttribute> attributesToFetch) throws CacheException {
try {
FsPath temporaryDir = getParentOfFile(temporaryPath);
FsPath finalDir = getParentOfFile(finalPath);
checkIsTemporaryDirectory(temporaryPath, temporaryDir);
/* File must have been created...
*/
ExtendedInode uploadDirInode;
ExtendedInode temporaryDirInode;
ExtendedInode inodeOfFile;
try {
uploadDirInode = new ExtendedInode(_fs, _fs.path2inode(temporaryDir.parent().toString()));
temporaryDirInode = uploadDirInode.inodeOf(temporaryDir.name(), STAT);
inodeOfFile = temporaryDirInode.inodeOf(temporaryPath.name(), STAT);
} catch (FileNotFoundChimeraFsException e) {
throw new FileNotFoundCacheException("No such file or directory: " + temporaryPath, e);
}
/* ...and upload must have completed...
*/
ImmutableList<StorageLocatable> locations = inodeOfFile.getLocations();
if (locations.isEmpty()) {
throw new FileIsNewCacheException("Upload has not completed.");
}
/* ...and it must have the correct size.
*/
ImmutableList<String> size = inodeOfFile.getTag(TAG_EXPECTED_SIZE);
if (!size.isEmpty()) {
long expectedSize = Long.parseLong(size.get(0));
long actualSize = inodeOfFile.statCache().getSize();
if (expectedSize != actualSize) {
throw new FileCorruptedCacheException(expectedSize, actualSize);
}
}
/* Target directory must exist.
*/
ExtendedInode finalDirInode;
try {
finalDirInode = new ExtendedInode(_fs, _fs.path2inode(finalDir.toString()));
} catch (FileNotFoundChimeraFsException e) {
throw new FileNotFoundCacheException("No such file or directory: " + finalDir, e);
}
/* File must not exist unless overwrite is enabled.
*/
try {
ExtendedInode inodeOfExistingFile = finalDirInode.inodeOf(finalPath.name(), STAT);
if (!options.contains(CreateOption.OVERWRITE_EXISTING)) {
throw new FileExistsCacheException("File exists: " + finalPath);
}
/* User must be authorized to delete existing file.
*/
if (!Subjects.isExemptFromNamespaceChecks(subject)) {
FileAttributes attributesOfParent = getFileAttributesForPermissionHandler(finalDirInode);
FileAttributes attributesOfFile = getFileAttributesForPermissionHandler(inodeOfExistingFile);
if (_permissionHandler.canDeleteFile(subject, attributesOfParent, attributesOfFile) != ACCESS_ALLOWED) {
throw new PermissionDeniedCacheException("Overwrite denied: " + finalPath);
}
}
} catch (FileNotFoundChimeraFsException ignored) {
}
/* Read file attributes before moving the file. Otherwise the cached parent will
* be gone.
*/
FileAttributes attributes = getFileAttributes(inodeOfFile, attributesToFetch);
/* File is moved to correct directory.
*/
_fs.rename(inodeOfFile, temporaryDirInode, temporaryPath.name(), finalDirInode, finalPath.name());
/* Delete temporary upload directory and any files in it.
*/
removeRecursively(uploadDirInode, temporaryDir.name(), temporaryDirInode, i -> {
});
return attributes;
} catch (ChimeraFsException e) {
throw new CacheException(CacheException.UNEXPECTED_SYSTEM_EXCEPTION, e.getMessage());
} catch (NumberFormatException e) {
throw new FileCorruptedCacheException("Failed to commit file: " + e.getMessage());
}
}
use of diskCacheV111.util.FileExistsCacheException in project dcache by dCache.
the class ChimeraNameSpaceProvider method createUploadPath.
@Override
public FsPath createUploadPath(Subject subject, FsPath path, FsPath rootPath, Long size, AccessLatency al, RetentionPolicy rp, String spaceToken, Set<CreateOption> options) throws CacheException {
checkState(_uploadDirectory != null, "Upload directory is not configured.");
try {
/* Parent directory must exist.
*/
ExtendedInode parentOfPath = options.contains(CreateOption.CREATE_PARENTS) ? installDirectory(subject, path.parent(), INHERIT_MODE) : lookupDirectory(subject, path.parent());
FileAttributes attributesOfParent = !Subjects.isExemptFromNamespaceChecks(subject) ? getFileAttributesForPermissionHandler(parentOfPath) : null;
/* File must not exist unless overwrite is enabled.
*/
try {
ExtendedInode inodeOfPath = parentOfPath.inodeOf(path.name(), STAT);
if (!options.contains(CreateOption.OVERWRITE_EXISTING) || (inodeOfPath.statCache().getMode() & UnixPermission.S_TYPE) != UnixPermission.S_IFREG) {
throw new FileExistsCacheException("File exists: " + path);
}
/* User must be authorized to delete existing file.
*/
if (!Subjects.isExemptFromNamespaceChecks(subject)) {
FileAttributes attributesOfPath = getFileAttributesForPermissionHandler(inodeOfPath);
if (_permissionHandler.canDeleteFile(subject, attributesOfParent, attributesOfPath) != ACCESS_ALLOWED) {
throw new PermissionDeniedCacheException("Access denied: " + path);
}
}
} catch (FileNotFoundChimeraFsException ignored) {
}
/* User must be authorized to create file.
*/
if (!Subjects.isExemptFromNamespaceChecks(subject)) {
if (_permissionHandler.canCreateFile(subject, attributesOfParent) != ACCESS_ALLOWED) {
throw new PermissionDeniedCacheException("Access denied: " + path);
}
}
/* Attributes are inherited from real parent directory.
*/
int mode = parentOfPath.statCache().getMode() & UnixPermission.S_PERMS;
int gid;
if ((mode & UnixPermission.S_ISGID) != 0) {
gid = parentOfPath.statCache().getGid();
} else if (Subjects.isNobody(subject) || _inheritFileOwnership) {
gid = parentOfPath.statCache().getGid();
} else {
gid = Ints.checkedCast(Subjects.getPrimaryGid(subject));
}
int uid;
if (Subjects.isNobody(subject) || _inheritFileOwnership) {
uid = parentOfPath.statCache().getUid();
} else {
uid = Ints.checkedCast(Subjects.getUid(subject));
}
/* ACLs are copied from real parent to the temporary upload directory
* such that the upload is allowed (in case write permissions rely
* on ACLs) and such that the file will inherit the correct ACLs.
*/
List<ACE> acl = _fs.getACL(parentOfPath);
/* The temporary upload directory has the same tags as the real parent,
* except target file specific properties are stored as tags local to
* the upload directory.
*/
Map<String, byte[]> tags = Maps.newHashMap(parentOfPath.getTags());
if (spaceToken != null) {
tags.put(TAG_WRITE_TOKEN, spaceToken.getBytes(UTF_8));
/* If client provides space token to upload to, the access latency and
* retention policy tags of the upload directory must be disregarded.
*/
tags.remove(TAG_ACCESS_LATENCY);
tags.remove(TAG_RETENTION_POLICY);
}
if (al != null) {
tags.put(TAG_ACCESS_LATENCY, al.toString().getBytes(UTF_8));
}
if (rp != null) {
tags.put(TAG_RETENTION_POLICY, rp.toString().getBytes(UTF_8));
}
if (size != null) {
tags.put(TAG_EXPECTED_SIZE, size.toString().getBytes(UTF_8));
}
tags.put(TAG_PATH, path.toString().getBytes(UTF_8));
/* Upload directory may optionally be relative to the user's root path. Whether
* that's the case depends on if the configured upload directory is an absolute
* or relative path.
*/
FsPath uploadDirectory = rootPath.resolve(_uploadDirectory);
if (_uploadSubDirectory != null) {
uploadDirectory = uploadDirectory.chroot(String.format(_uploadSubDirectory, threadId.get()));
}
/* Upload directory must exist and have the right permissions.
*/
ExtendedInode inodeOfUploadDir = installSystemDirectory(uploadDirectory, 0711, Collections.emptyList(), Collections.emptyMap());
if (inodeOfUploadDir.statCache().getUid() != 0) {
LOGGER.error("Owner must be root: {}", uploadDirectory);
throw new CacheException("Owner must be root: " + uploadDirectory);
}
if ((inodeOfUploadDir.statCache().getMode() & UnixPermission.S_PERMS) != 0711) {
LOGGER.error("File mode must be 0711: {}", uploadDirectory);
throw new CacheException("File mode must be 0711: " + uploadDirectory);
}
/* Use cryptographically strong pseudo random UUID to create temporary upload directory.
*/
UUID uuid = UUID.randomUUID();
_fs.mkdir(inodeOfUploadDir, uuid.toString(), uid, gid, mode, acl, tags);
return uploadDirectory.child(uuid.toString()).child(path.name());
} catch (ChimeraFsException e) {
LOGGER.error("Problem with database: {}", e.getMessage());
throw new CacheException(CacheException.UNEXPECTED_SYSTEM_EXCEPTION, e.getMessage());
}
}
use of diskCacheV111.util.FileExistsCacheException in project dcache by dCache.
the class Storage method putDone.
@Override
public void putDone(SRMUser user, String localTransferPath, URI surl, boolean overwrite) throws SRMException {
try {
Subject subject = asDcacheUser(user).getSubject();
Restriction restriction = asDcacheUser(user).getRestriction();
FsPath fullPath = getPath(surl);
checkNonBrokenUpload(localTransferPath);
EnumSet<CreateOption> options = EnumSet.noneOf(CreateOption.class);
if (overwrite) {
options.add(CreateOption.OVERWRITE_EXISTING);
}
PnfsCommitUpload msg = new PnfsCommitUpload(subject, restriction, FsPath.create(localTransferPath), fullPath, options, EnumSet.of(PNFSID, SIZE, STORAGEINFO));
msg = _pnfsStub.sendAndWait(msg);
DoorRequestInfoMessage infoMsg = new DoorRequestInfoMessage(getCellAddress());
infoMsg.setSubject(subject);
infoMsg.setBillingPath(fullPath.toString());
infoMsg.setTransferPath(localTransferPath);
infoMsg.setTransaction(CDC.getSession());
infoMsg.setPnfsId(msg.getFileAttributes().getPnfsId());
infoMsg.setResult(0, "");
infoMsg.setFileSize(msg.getFileAttributes().getSizeIfPresent().orElse(0L));
infoMsg.setStorageInfo(msg.getFileAttributes().getStorageInfo());
Origin origin = Subjects.getOrigin(subject);
if (origin != null) {
infoMsg.setClient(origin.getAddress().getHostAddress());
}
_billingStub.notify(infoMsg);
} catch (FileNotFoundCacheException e) {
throw new SRMInvalidPathException(e.getMessage(), e);
} catch (FileIsNewCacheException | FileCorruptedCacheException e) {
throw new SRMException(e.getMessage(), e);
} catch (PermissionDeniedCacheException e) {
throw new SRMAuthorizationException("Permission denied.", e);
} catch (FileExistsCacheException e) {
throw new SRMDuplicationException(surl + " exists.", e);
} catch (CacheException e) {
throw new SRMInternalErrorException(e.getMessage(), e);
} catch (InterruptedException e) {
throw new SRMInternalErrorException("Operation interrupted", e);
} catch (NoRouteToCellException e) {
throw new SRMInternalErrorException("Internal communication failure", e);
}
}
Aggregations