Search in sources :

Example 1 with OzonePrefixPathImpl

use of org.apache.hadoop.ozone.om.OzonePrefixPathImpl in project ozone by apache.

the class TestOzoneFileSystem method testListStatusOnLargeDirectoryForACLCheck.

@Test
public void testListStatusOnLargeDirectoryForACLCheck() throws Exception {
    String keyName = "dir1/dir2/testListStatusOnLargeDirectoryForACLCheck";
    Path root = new Path(OZONE_URI_DELIMITER, keyName);
    Set<String> paths = new TreeSet<>();
    int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
    for (int i = 0; i < numDirs; i++) {
        Path p = new Path(root, String.valueOf(i));
        getFs().mkdirs(p);
        paths.add(keyName + OM_KEY_PREFIX + p.getName());
    }
    // unknown keyname
    try {
        new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey", cluster.getOzoneManager().getKeyManager());
        Assert.fail("Non-existent key name!");
    } catch (OMException ome) {
        Assert.assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult());
    }
    OzonePrefixPathImpl ozonePrefixPath = new OzonePrefixPathImpl(getVolumeName(), getBucketName(), keyName, cluster.getOzoneManager().getKeyManager());
    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
    Assert.assertNotNull(status);
    Assert.assertEquals(keyName, status.getTrimmedName());
    Assert.assertTrue(status.isDirectory());
    Iterator<? extends OzoneFileStatus> pathItr = ozonePrefixPath.getChildren(keyName);
    Assert.assertTrue("Failed to list keyPath:" + keyName, pathItr.hasNext());
    Set<String> actualPaths = new TreeSet<>();
    while (pathItr.hasNext()) {
        String pathname = pathItr.next().getTrimmedName();
        actualPaths.add(pathname);
        // no subpaths, expected an empty list
        Iterator<? extends OzoneFileStatus> subPathItr = ozonePrefixPath.getChildren(pathname);
        Assert.assertNotNull(subPathItr);
        Assert.assertFalse("Failed to list keyPath: " + pathname, subPathItr.hasNext());
    }
    Assert.assertEquals("ListStatus failed", paths.size(), actualPaths.size());
    for (String pathname : actualPaths) {
        paths.remove(pathname);
    }
    Assert.assertTrue("ListStatus failed:" + paths, paths.isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) TreeSet(java.util.TreeSet) OzonePrefixPathImpl(org.apache.hadoop.ozone.om.OzonePrefixPathImpl) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) Test(org.junit.Test)

Example 2 with OzonePrefixPathImpl

use of org.apache.hadoop.ozone.om.OzonePrefixPathImpl in project ozone by apache.

the class OMClientRequest method checkACLsWithFSO.

/**
 * Check Acls for the ozone key.
 * @param ozoneManager
 * @param volumeName
 * @param bucketName
 * @param keyName
 * @throws IOException
 */
protected void checkACLsWithFSO(OzoneManager ozoneManager, String volumeName, String bucketName, String keyName, IAccessAuthorizer.ACLType aclType) throws IOException {
    // TODO: Presently not populating sub-paths under a single bucket
    // lock. Need to revisit this to handle any concurrent operations
    // along with this.
    OzonePrefixPathImpl pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, keyName, ozoneManager.getKeyManager());
    OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setResType(OzoneObj.ResourceType.KEY).setStoreType(OzoneObj.StoreType.OZONE).setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setOzonePrefixPath(pathViewer).build();
    RequestContext.Builder contextBuilder = RequestContext.newBuilder().setAclRights(aclType).setRecursiveAccessCheck(pathViewer.isCheckRecursiveAccess());
    // check Acl
    if (ozoneManager.getAclsEnabled()) {
        String volumeOwner = ozoneManager.getVolumeOwner(obj.getVolumeName(), contextBuilder.getAclRights(), obj.getResourceType());
        String bucketOwner = ozoneManager.getBucketOwner(obj.getVolumeName(), obj.getBucketName(), contextBuilder.getAclRights(), obj.getResourceType());
        UserGroupInformation currentUser = createUGI();
        contextBuilder.setClientUgi(currentUser);
        contextBuilder.setIp(getRemoteAddress());
        contextBuilder.setHost(getHostName());
        contextBuilder.setAclType(IAccessAuthorizer.ACLIdentityType.USER);
        boolean isVolOwner = isOwner(currentUser, volumeOwner);
        IAccessAuthorizer.ACLType parentAclRight = aclType;
        if (isVolOwner) {
            contextBuilder.setOwnerName(volumeOwner);
        } else {
            contextBuilder.setOwnerName(bucketOwner);
        }
        if (ozoneManager.isNativeAuthorizerEnabled()) {
            if (aclType == IAccessAuthorizer.ACLType.CREATE || aclType == IAccessAuthorizer.ACLType.DELETE || aclType == IAccessAuthorizer.ACLType.WRITE_ACL) {
                parentAclRight = IAccessAuthorizer.ACLType.WRITE;
            } else if (aclType == IAccessAuthorizer.ACLType.READ_ACL || aclType == IAccessAuthorizer.ACLType.LIST) {
                parentAclRight = IAccessAuthorizer.ACLType.READ;
            }
        } else {
            parentAclRight = IAccessAuthorizer.ACLType.READ;
        }
        OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder().setResType(OzoneObj.ResourceType.VOLUME).setStoreType(OzoneObj.StoreType.OZONE).setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).build();
        RequestContext volumeContext = RequestContext.newBuilder().setClientUgi(currentUser).setIp(getRemoteAddress()).setHost(getHostName()).setAclType(IAccessAuthorizer.ACLIdentityType.USER).setAclRights(parentAclRight).setOwnerName(volumeOwner).build();
        ozoneManager.checkAcls(volumeObj, volumeContext, true);
        ozoneManager.checkAcls(obj, contextBuilder.build(), true);
    }
}
Also used : OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) IAccessAuthorizer(org.apache.hadoop.ozone.security.acl.IAccessAuthorizer) OzonePrefixPathImpl(org.apache.hadoop.ozone.om.OzonePrefixPathImpl) RequestContext(org.apache.hadoop.ozone.security.acl.RequestContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 3 with OzonePrefixPathImpl

use of org.apache.hadoop.ozone.om.OzonePrefixPathImpl in project ozone by apache.

the class TestOMKeyDeleteRequestWithFSO method testOzonePrefixPathViewer.

@Test
public void testOzonePrefixPathViewer() throws Exception {
    // Add volume, bucket and key entries to OM DB.
    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED);
    String ozoneKey = addKeyToTable();
    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey);
    // As we added manually to key table.
    Assert.assertNotNull(omKeyInfo);
    // OzonePrefixPathImpl on a directory
    OzonePrefixPathImpl ozonePrefixPath = new OzonePrefixPathImpl(volumeName, bucketName, "c", keyManager);
    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
    Assert.assertNotNull(status);
    Assert.assertEquals("c", status.getTrimmedName());
    Assert.assertTrue(status.isDirectory());
    verifyPath(ozonePrefixPath, "c", "c/d");
    verifyPath(ozonePrefixPath, "c/d", "c/d/e");
    verifyPath(ozonePrefixPath, "c/d/e", "c/d/e/file1");
    try {
        ozonePrefixPath.getChildren("c/d/e/file1");
        Assert.fail("Should throw INVALID_KEY_NAME as the given path is a file.");
    } catch (OMException ome) {
        Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, ome.getResult());
    }
    // OzonePrefixPathImpl on a file
    ozonePrefixPath = new OzonePrefixPathImpl(volumeName, bucketName, "c/d/e/file1", keyManager);
    status = ozonePrefixPath.getOzoneFileStatus();
    Assert.assertNotNull(status);
    Assert.assertEquals("c/d/e/file1", status.getTrimmedName());
    Assert.assertEquals("c/d/e/file1", status.getKeyInfo().getKeyName());
    Assert.assertTrue(status.isFile());
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzonePrefixPathImpl(org.apache.hadoop.ozone.om.OzonePrefixPathImpl) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 4 with OzonePrefixPathImpl

use of org.apache.hadoop.ozone.om.OzonePrefixPathImpl in project ozone by apache.

the class TestOMKeyDeleteRequestWithFSO method testRecursiveAccessCheck.

@Test
public void testRecursiveAccessCheck() throws Exception {
    // Add volume, bucket and key entries to OM DB.
    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout());
    // Case 1:
    // We create an empty directory structure.
    String parentKey = "x/y/";
    String key = "x/y/z/";
    addKeyToDirTable(volumeName, bucketName, key);
    // Instantiate PrefixPath for complete key.
    OzonePrefixPathImpl pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, key, ozoneManager.getKeyManager());
    // 'x/y/z' has no sub-directories or sub files - recursive access check
    // should not be enabled for this case.
    Assert.assertFalse(pathViewer.isCheckRecursiveAccess());
    // Instantiate PrefixPath for parent key.
    pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, parentKey, ozoneManager.getKeyManager());
    // 'x/y/' has a sub-directory 'z', hence, we should be performing recursive
    // access check.
    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
    // Case 2:
    // We create a directory structure with a file as the leaf node.
    // 'c/d/e/file1'.
    String ozoneKey = addKeyToTable();
    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey);
    // As we added manually to key table.
    Assert.assertNotNull(omKeyInfo);
    // Instantiate PrefixPath for parent key 'c/d/'.
    pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, INTERMEDIATE_DIR, ozoneManager.getKeyManager());
    // 'c/d' has a sub-directory 'e', hence, we should be performing recursive
    // access check.
    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
    // Instantiate PrefixPath for complete directory structure (without file).
    pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, PARENT_DIR, ozoneManager.getKeyManager());
    // 'c/d/e/' has a 'file1' under it, hence, we should be performing recursive
    // access check.
    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
    // Instantiate PrefixPath for complete file1.
    pathViewer = new OzonePrefixPathImpl(volumeName, bucketName, FILE_KEY, ozoneManager.getKeyManager());
    // Recursive access check is only enabled for directories, hence should be
    // false for file1.
    Assert.assertFalse(pathViewer.isCheckRecursiveAccess());
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzonePrefixPathImpl(org.apache.hadoop.ozone.om.OzonePrefixPathImpl) Test(org.junit.Test)

Example 5 with OzonePrefixPathImpl

use of org.apache.hadoop.ozone.om.OzonePrefixPathImpl in project ozone by apache.

the class TestOzoneObj method getBuilder.

private OzoneObjInfo.Builder getBuilder(String withVolume, String withBucket, String withKey) throws IOException {
    KeyManager mockKeyManager = mock(KeyManager.class);
    OzonePrefixPath prefixPathViewer = new OzonePrefixPathImpl("vol1", "buck1", "file", mockKeyManager);
    return OzoneObjInfo.Builder.newBuilder().setResType(OzoneObj.ResourceType.VOLUME).setStoreType(STORE).setVolumeName(withVolume).setBucketName(withBucket).setKeyName(withKey).setOzonePrefixPath(prefixPathViewer);
}
Also used : OzonePrefixPathImpl(org.apache.hadoop.ozone.om.OzonePrefixPathImpl) KeyManager(org.apache.hadoop.ozone.om.KeyManager)

Aggregations

OzonePrefixPathImpl (org.apache.hadoop.ozone.om.OzonePrefixPathImpl)5 Test (org.junit.Test)3 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)2 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)2 OzoneFileStatus (org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)2 TreeSet (java.util.TreeSet)1 Path (org.apache.hadoop.fs.Path)1 KeyManager (org.apache.hadoop.ozone.om.KeyManager)1 IAccessAuthorizer (org.apache.hadoop.ozone.security.acl.IAccessAuthorizer)1 OzoneObj (org.apache.hadoop.ozone.security.acl.OzoneObj)1 RequestContext (org.apache.hadoop.ozone.security.acl.RequestContext)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1