use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class WebHdfsFileSystem method getDelegationToken.
@Override
public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
Token<DelegationTokenIdentifier> token = new FsPathResponseRunner<Token<DelegationTokenIdentifier>>(op, null, new RenewerParam(renewer)) {
@Override
Token<DelegationTokenIdentifier> decodeResponse(Map<?, ?> json) throws IOException {
return JsonUtilClient.toDelegationToken(json);
}
}.run();
if (token != null) {
token.setService(tokenServiceName);
} else {
if (disallowFallbackToInsecureCluster) {
throw new AccessControlException(CANT_FALLBACK_TO_INSECURE_MSG);
}
}
return token;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class CacheManager method listCacheDirectives.
public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
String filterPath = null;
if (filter.getPath() != null) {
filterPath = validatePath(filter);
}
if (filter.getReplication() != null) {
throw new InvalidRequestException("Filtering by replication is unsupported.");
}
// Querying for a single ID
final Long id = filter.getId();
if (id != null) {
if (!directivesById.containsKey(id)) {
throw new InvalidRequestException("Did not find requested id " + id);
}
// Since we use a tailMap on directivesById, setting prev to id-1 gets
// us the directive with the id (if present)
prevId = id - 1;
}
ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesNumResponses) {
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
}
CacheDirective curDirective = cur.getValue();
CacheDirectiveInfo info = cur.getValue().toInfo();
// item and should break out.
if (id != null && !(info.getId().equals(id))) {
break;
}
if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
boolean hasPermission = true;
if (pc != null) {
try {
pc.checkPermission(curDirective.getPool(), FsAction.READ);
} catch (AccessControlException e) {
hasPermission = false;
}
}
if (hasPermission) {
replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
numReplies++;
}
}
return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSDirectory method resolvePath.
/**
* Resolves a given path into an INodesInPath. All ancestor inodes that
* exist are validated as traversable directories. Symlinks in the ancestry
* will generate an UnresolvedLinkException. The returned IIP will be an
* accessible path that also passed additional sanity checks based on how
* the path will be used as specified by the DirOp.
* READ: Expands reserved paths and performs permission checks
* during traversal. Raw paths are only accessible by a superuser.
* WRITE: In addition to READ checks, ensures the path is not a
* snapshot path.
* CREATE: In addition to WRITE checks, ensures path does not contain
* illegal character sequences.
*
* @param pc A permission checker for traversal checks. Pass null for
* no permission checks.
* @param src The path to resolve.
* @param dirOp The {@link DirOp} that controls additional checks.
* @param resolveLink If false, only ancestor symlinks will be checked. If
* true, the last inode will also be checked.
* @return if the path indicates an inode, return path after replacing up to
* <inodeid> with the corresponding path of the inode, else the path
* in {@code src} as is. If the path refers to a path in the "raw"
* directory, return the non-raw pathname.
* @throws FileNotFoundException
* @throws AccessControlException
* @throws ParentNotDirectoryException
* @throws UnresolvedLinkException
*/
@VisibleForTesting
public INodesInPath resolvePath(FSPermissionChecker pc, String src, DirOp dirOp) throws UnresolvedLinkException, FileNotFoundException, AccessControlException, ParentNotDirectoryException {
boolean isCreate = (dirOp == DirOp.CREATE || dirOp == DirOp.CREATE_LINK);
// prevent creation of new invalid paths
if (isCreate && !DFSUtil.isValidName(src)) {
throw new InvalidPathException("Invalid file name: " + src);
}
byte[][] components = INode.getPathComponents(src);
boolean isRaw = isReservedRawName(components);
if (isPermissionEnabled && pc != null && isRaw) {
pc.checkSuperuserPrivilege();
}
components = resolveComponents(components, this);
INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw);
// PNDE
try {
checkTraverse(pc, iip, dirOp);
} catch (ParentNotDirectoryException pnde) {
if (!isCreate) {
throw new AccessControlException(pnde.getMessage());
}
throw pnde;
}
return iip;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSDirStatAndListingOp method getFileInfo.
/**
* Get the file info for a specific file.
*
* @param srcArg The string representation of the path to the file
* @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink
*
* @return object containing information regarding the file
* or null if file not found
*/
static HdfsFileStatus getFileInfo(FSDirectory fsd, String srcArg, boolean resolveLink) throws IOException {
DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK;
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip;
if (pc.isSuperUser()) {
// superuser to receive null instead.
try {
iip = fsd.resolvePath(pc, srcArg, dirOp);
} catch (AccessControlException ace) {
return null;
}
} else {
iip = fsd.resolvePath(pc, srcArg, dirOp);
}
return getFileInfo(fsd, iip);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestStickyBit method confirmDeletingFiles.
/**
* Test that one user can't delete another user's file when the sticky bit is
* set.
*/
private void confirmDeletingFiles(Configuration conf, Path p) throws Exception {
// Write a file to the new temp directory as a regular user
Path file = new Path(p, "foo");
writeFile(hdfsAsUser1, file);
// Make sure the correct user is the owner
assertEquals(user1.getShortUserName(), hdfsAsUser1.getFileStatus(file).getOwner());
// Log onto cluster as another user and attempt to delete the file
try {
hdfsAsUser2.delete(file, false);
fail("Shouldn't be able to delete someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
assertTrue(ioe.getMessage().contains("user=" + user2.getUserName()));
assertTrue(ioe.getMessage().contains("path=\"" + file + "\""));
assertTrue(ioe.getMessage().contains("parent=\"" + file.getParent() + "\""));
}
}
Aggregations