use of org.dcache.namespace.FileAttribute.TYPE in project dcache by dCache.
the class RemoteNameSpaceProviderTests method shouldSucceedWhenGetFileAttributesForExistingEntry.
@Test
public void shouldSucceedWhenGetFileAttributesForExistingEntry() throws Exception {
givenSuccessfulResponse((Modifier<PnfsGetFileAttributes>) (r) -> r.setFileAttributes(fileAttributes().withSize(1234L).withType(REGULAR).build()));
FileAttributes attributes = _namespace.getFileAttributes(ROOT, A_PNFSID, EnumSet.of(TYPE, SIZE));
PnfsGetFileAttributes sent = getSingleSendAndWaitMessage(PnfsGetFileAttributes.class);
assertThat(sent.getReplyRequired(), is(true));
assertThat(sent.getSubject(), is(ROOT));
assertThat(sent.getPnfsId(), is(A_PNFSID));
assertThat(sent.getRequestedAttributes(), hasSize(2));
assertThat(sent.getRequestedAttributes(), hasItem(TYPE));
assertThat(sent.getRequestedAttributes(), hasItem(SIZE));
assertThat(attributes.getSize(), is(1234L));
assertThat(attributes.getFileType(), is(REGULAR));
}
use of org.dcache.namespace.FileAttribute.TYPE in project dcache by dCache.
the class ChimeraNameSpaceProvider method setFileAttributes.
@Override
public FileAttributes setFileAttributes(Subject subject, PnfsId pnfsId, FileAttributes attr, Set<FileAttribute> acquire) throws CacheException {
LOGGER.debug("File attributes update: {}", attr.getDefinedAttributes());
try {
ExtendedInode inode = new ExtendedInode(_fs, pnfsId, Subjects.isExemptFromNamespaceChecks(subject) ? NO_STAT : STAT);
if (!Subjects.isExemptFromNamespaceChecks(subject)) {
FileAttributes attributes = getFileAttributesForPermissionHandler(inode);
if (_permissionHandler.canSetAttributes(subject, attributes, attr) != ACCESS_ALLOWED) {
throw new PermissionDeniedCacheException("Access denied: " + pnfsId);
}
}
/* Update the t_inodes row first (the Stat object) to acquire a FOR UPDATE / FOR NO KEY UPDATE
* first. If the inserts into secondary table referring t_inodes would be done first, the
* referential integrity check would obtain a FOR SHARE / FOR KEY SHARE on the t_inodes row which
* latter would have to be upgraded (potentially leading to deadlocks if that's not possible).
*/
Stat stat = new Stat();
for (FileAttribute attribute : attr.getDefinedAttributes()) {
switch(attribute) {
case LOCATIONS:
// REVISIT: may be we need an explicit indication from pool
if (attr.isDefined(SIZE)) {
stat.setState(FileState.STORED);
}
break;
case SIZE:
// REVISIT: pool shouldn't update the files size on flush, but this is required due to space manager accounting
if (!attr.isDefined(STORAGEINFO) || !attr.getStorageInfo().isSetAddLocation()) {
stat.setSize(attr.getSize());
}
break;
case MODE:
stat.setMode(attr.getMode());
break;
case CREATION_TIME:
stat.setCrTime(attr.getCreationTime());
break;
case CHANGE_TIME:
stat.setCTime(attr.getChangeTime());
break;
case MODIFICATION_TIME:
stat.setMTime(attr.getModificationTime());
break;
case ACCESS_TIME:
stat.setATime(attr.getAccessTime());
break;
case OWNER:
stat.setUid(attr.getOwner());
break;
case OWNER_GROUP:
stat.setGid(attr.getGroup());
break;
case CHECKSUM:
break;
case ACCESS_LATENCY:
stat.setAccessLatency(attr.getAccessLatency());
break;
case RETENTION_POLICY:
stat.setRetentionPolicy(attr.getRetentionPolicy());
break;
case FLAGS:
break;
case ACL:
break;
case STORAGEINFO:
_extractor.setStorageInfo(inode, attr.getStorageInfo());
break;
case XATTR:
break;
case LABELS:
break;
default:
throw new UnsupportedOperationException("Attribute " + attribute + " not supported yet.");
}
}
if (stat.isDefinedAny()) {
inode.setStat(stat);
}
if (attr.isDefined(XATTR)) {
Map<String, String> xattrs = attr.getXattrs();
for (Map.Entry<String, String> e : xattrs.entrySet()) {
_fs.setXattr(inode, e.getKey(), e.getValue().getBytes(StandardCharsets.UTF_8), SetXattrMode.EITHER);
}
}
if (attr.isDefined(LABELS)) {
for (String label : attr.getLabels()) {
_fs.addLabel(inode, label);
}
}
if (attr.isDefined(FileAttribute.LOCATIONS)) {
for (String location : attr.getLocations()) {
_fs.addInodeLocation(inode, StorageGenericLocation.DISK, location);
}
}
if (attr.isDefined(FileAttribute.CHECKSUM)) {
for (Checksum newChecksum : attr.getChecksums()) {
ChecksumType type = newChecksum.getType();
Optional<Checksum> existingChecksum = _fs.getInodeChecksums(inode).stream().filter(c -> c.getType() == type).findFirst();
if (existingChecksum.isPresent()) {
Checksum existing = existingChecksum.get();
if (!existing.equals(newChecksum)) {
throw new FileCorruptedCacheException(existing, newChecksum);
}
} else {
_fs.setInodeChecksum(inode, type.getType(), newChecksum.getValue());
}
}
}
if (attr.isDefined(FileAttribute.FLAGS)) {
FsInode level2 = new ExtendedInode(_fs, pnfsId, NO_STAT).getLevel(2);
ChimeraCacheInfo cacheInfo = new ChimeraCacheInfo(level2);
for (Map.Entry<String, String> flag : attr.getFlags().entrySet()) {
cacheInfo.getFlags().put(flag.getKey(), flag.getValue());
}
cacheInfo.writeCacheInfo(level2);
}
if (attr.isDefined(FileAttribute.ACL)) {
ACL acl = attr.getAcl();
_fs.setACL(inode, acl.getList());
}
return getFileAttributes(inode, acquire);
} catch (FileNotFoundChimeraFsException e) {
throw new FileNotFoundCacheException("No such file or directory: " + pnfsId);
} catch (IOException e) {
LOGGER.error("Exception in setFileAttributes: {}", e);
throw new CacheException(CacheException.UNEXPECTED_SYSTEM_EXCEPTION, e.getMessage());
}
}
Aggregations