use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class XAttrHelper method buildXAttrMap.
/**
* Build xattr map from <code>XAttr</code> list, the key is
* xattr name with prefix, and value is xattr value.
*/
public static Map<String, byte[]> buildXAttrMap(List<XAttr> xAttrs) {
if (xAttrs == null) {
return null;
}
Map<String, byte[]> xAttrMap = Maps.newHashMap();
for (XAttr xAttr : xAttrs) {
String name = getPrefixedName(xAttr);
byte[] value = xAttr.getValue();
if (value == null) {
value = new byte[0];
}
xAttrMap.put(name, value);
}
return xAttrMap;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirAttrOp method setDirStoragePolicy.
private static void setDirStoragePolicy(FSDirectory fsd, INodesInPath iip, byte policyId) throws IOException {
INode inode = FSDirectory.resolveLastINode(iip);
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId);
List<XAttr> newXAttrs = null;
if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
List<XAttr> toRemove = Lists.newArrayList();
toRemove.add(xAttr);
List<XAttr> removed = Lists.newArrayList();
newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs, toRemove, removed);
} else {
newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs, Arrays.asList(xAttr), EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, iip.getLatestSnapshotId());
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirXAttrOp method filterINodeXAttrs.
/**
* Filter XAttrs from a list of existing XAttrs. Removes matched XAttrs from
* toFilter and puts them into filtered. Upon completion,
* toFilter contains the filter XAttrs that were not found, while
* fitleredXAttrs contains the XAttrs that were found.
*
* @param existingXAttrs Existing XAttrs to be filtered
* @param toFilter XAttrs to filter from the existing XAttrs
* @param filtered Return parameter, XAttrs that were filtered
* @return List of XAttrs that does not contain filtered XAttrs
*/
@VisibleForTesting
static List<XAttr> filterINodeXAttrs(final List<XAttr> existingXAttrs, final List<XAttr> toFilter, final List<XAttr> filtered) throws AccessControlException {
if (existingXAttrs == null || existingXAttrs.isEmpty() || toFilter == null || toFilter.isEmpty()) {
return existingXAttrs;
}
// Populate a new list with XAttrs that pass the filter
List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size());
for (XAttr a : existingXAttrs) {
boolean add = true;
for (ListIterator<XAttr> it = toFilter.listIterator(); it.hasNext(); ) {
XAttr filter = it.next();
Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter), "The encryption zone xattr should never be deleted.");
if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) {
throw new AccessControlException("The xattr '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.");
}
if (a.equalsIgnoreValue(filter)) {
add = false;
it.remove();
filtered.add(filter);
break;
}
}
if (add) {
newXAttrs.add(a);
}
}
return newXAttrs;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirXAttrOp method getXAttrs.
static List<XAttr> getXAttrs(FSDirectory fsd, final String srcArg, List<XAttr> xAttrs) throws IOException {
String src = srcArg;
checkXAttrsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
final boolean isRawPath = FSDirectory.isReservedRawName(src);
boolean getAll = xAttrs == null || xAttrs.isEmpty();
if (!getAll) {
XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
}
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, iip);
List<XAttr> filteredAll = XAttrPermissionFilter.filterXAttrsForApi(pc, all, isRawPath);
if (getAll) {
return filteredAll;
}
if (filteredAll == null || filteredAll.isEmpty()) {
throw new IOException("At least one of the attributes provided was not found.");
}
List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
boolean foundIt = false;
for (XAttr a : filteredAll) {
if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals(a.getName())) {
toGet.add(a);
foundIt = true;
break;
}
}
if (!foundIt) {
throw new IOException("At least one of the attributes provided was not found.");
}
}
return toGet;
}
use of org.apache.hadoop.fs.XAttr in project hadoop by apache.
the class FSDirErasureCodingOp method getErasureCodingPolicyForPath.
private static ErasureCodingPolicy getErasureCodingPolicyForPath(FSDirectory fsd, INodesInPath iip) throws IOException {
Preconditions.checkNotNull(iip, "INodes cannot be null");
fsd.readLock();
try {
List<INode> inodes = iip.getReadOnlyINodes();
for (int i = inodes.size() - 1; i >= 0; i--) {
final INode inode = inodes.get(i);
if (inode == null) {
continue;
}
if (inode.isFile()) {
byte id = inode.asFile().getErasureCodingPolicyID();
return id < 0 ? null : ErasureCodingPolicyManager.getPolicyByID(id);
}
// TODO: properly support symlinks
if (inode.isSymlink()) {
return null;
}
final XAttrFeature xaf = inode.getXAttrFeature();
if (xaf != null) {
XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
if (xattr != null) {
ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
DataInputStream dIn = new DataInputStream(bIn);
String ecPolicyName = WritableUtils.readString(dIn);
return ErasureCodingPolicyManager.getPolicyByName(ecPolicyName);
}
}
}
} finally {
fsd.readUnlock();
}
return null;
}
Aggregations