use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirErasureCodingOp method removeErasureCodingPolicyXAttr.
private static List<XAttr> removeErasureCodingPolicyXAttr(final FSNamesystem fsn, final INodesInPath srcIIP) throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
assert fsd.hasWriteLock();
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
String src = srcIIP.getPath();
final INode inode = srcIIP.getLastINode();
if (inode == null) {
throw new FileNotFoundException("Path not found: " + srcIIP.getPath());
}
if (!inode.isDirectory()) {
throw new IOException("Cannot unset an erasure coding policy " + "on a file " + src);
}
// Check whether the directory has a specific erasure coding policy
// directly on itself.
final XAttr ecXAttr = getErasureCodingPolicyXAttrForINode(fsn, inode);
if (ecXAttr == null) {
return null;
}
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(ecXAttr);
FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP.getPath(), xattrs);
return xattrs;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirErasureCodingOp method getErasureCodingPolicyXAttrForINode.
private static XAttr getErasureCodingPolicyXAttrForINode(FSNamesystem fsn, INode inode) throws IOException {
// INode can be null
if (inode == null) {
return null;
}
FSDirectory fsd = fsn.getFSDirectory();
fsd.readLock();
try {
// TODO: properly support symlinks
if (inode.isSymlink()) {
return null;
}
final XAttrFeature xaf = inode.getXAttrFeature();
if (xaf != null) {
XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
if (xattr != null) {
return xattr;
}
}
} finally {
fsd.readUnlock();
}
return null;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirErasureCodingOp method setErasureCodingPolicy.
/**
* Set an erasure coding policy on the given path.
*
* @param fsn The namespace
* @param srcArg The path of the target directory.
* @param ecPolicyName The erasure coding policy name to set on the target
* directory.
* @param logRetryCache whether to record RPC ids in editlog for retry
* cache rebuilding
* @return {@link HdfsFileStatus}
* @throws IOException
* @throws HadoopIllegalArgumentException if the policy is not enabled
* @throws AccessControlException if the user does not have write access
*/
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, final String srcArg, final String ecPolicyName, final FSPermissionChecker pc, final boolean logRetryCache) throws IOException, AccessControlException {
assert fsn.hasWriteLock();
String src = srcArg;
FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
List<XAttr> xAttrs;
fsd.writeLock();
try {
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager().getEnabledPolicyByName(ecPolicyName);
if (ecPolicy == null) {
final String sysPolicies = Arrays.asList(fsn.getErasureCodingPolicyManager().getEnabledPolicies()).stream().map(ErasureCodingPolicy::getName).collect(Collectors.joining(", "));
final String message = String.format("Policy '%s' does not match any " + "enabled erasure" + " coding policies: [%s]. The set of enabled erasure coding " + "policies can be configured at '%s'.", ecPolicyName, sysPolicies, DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY);
throw new HadoopIllegalArgumentException(message);
}
iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
// Write access is required to set erasure coding policy
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
src = iip.getPath();
xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
} finally {
fsd.writeUnlock();
}
fsn.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
return fsd.getAuditFileInfo(iip);
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class XAttrFormat method getXAttr.
/**
* Get XAttr by name with prefix.
* Will unpack the byte[] until find the specific XAttr
*
* @param attrs the packed bytes of XAttrs
* @param prefixedName the XAttr name with prefix
* @return the XAttr
*/
static XAttr getXAttr(byte[] attrs, String prefixedName) {
if (prefixedName == null || attrs == null) {
return null;
}
XAttr xAttr = XAttrHelper.buildXAttr(prefixedName);
for (int i = 0; i < attrs.length; ) {
// big-endian
int v = Ints.fromBytes(attrs[i], attrs[i + 1], attrs[i + 2], attrs[i + 3]);
i += 4;
int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
int nid = v & XATTR_NAME_MASK;
XAttr.NameSpace namespace = XATTR_NAMESPACE_VALUES[ns];
String name = XAttrStorage.getName(nid);
int vlen = ((0xff & attrs[i]) << 8) | (0xff & attrs[i + 1]);
i += 2;
if (xAttr.getNameSpace() == namespace && xAttr.getName().equals(name)) {
if (vlen > 0) {
byte[] value = new byte[vlen];
System.arraycopy(attrs, i, value, 0, vlen);
return new XAttr.Builder().setNameSpace(namespace).setName(name).setValue(value).build();
}
return xAttr;
}
i += vlen;
}
return null;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class XAttrPermissionFilter method filterXAttrsForApi.
static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, List<XAttr> xAttrs, boolean isRawPath) {
assert xAttrs != null : "xAttrs can not be null";
if (xAttrs.isEmpty()) {
return xAttrs;
}
List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
final boolean isSuperUser = pc.isSuperUser();
for (XAttr xAttr : xAttrs) {
if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
filteredXAttrs.add(xAttr);
} else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser) {
filteredXAttrs.add(xAttr);
} else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isSuperUser && isRawPath) {
filteredXAttrs.add(xAttr);
} else if (XAttrHelper.getPrefixedName(xAttr).equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
filteredXAttrs.add(xAttr);
}
}
return filteredXAttrs;
}
Aggregations