Search in sources :

Example 1 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestFSRMStateStore method verifyFilesUnreadablebyHDFS.

private void verifyFilesUnreadablebyHDFS(MiniDFSCluster cluster, Path root) throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    Queue<Path> paths = new LinkedList<>();
    paths.add(root);
    while (!paths.isEmpty()) {
        Path p = paths.poll();
        FileStatus stat = fs.getFileStatus(p);
        if (!stat.isDirectory()) {
            try {
                LOG.warn("\n\n ##Testing path [" + p + "]\n\n");
                fs.open(p);
                Assert.fail("Super user should not be able to read [" + UserGroupInformation.getCurrentUser() + "] [" + p.getName() + "]");
            } catch (AccessControlException e) {
                Assert.assertTrue(e.getMessage().contains("superuser is not allowed to perform this operation"));
            } catch (Exception e) {
                Assert.fail("Should get an AccessControlException here");
            }
        }
        if (stat.isDirectory()) {
            FileStatus[] ls = fs.listStatus(p);
            for (FileStatus f : ls) {
                paths.add(f.getPath());
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LinkedList(java.util.LinkedList) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException)

Example 2 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestAppManager method testQueueSubmitWithNoPermission.

@Test
public void testQueueSubmitWithNoPermission() throws IOException {
    YarnConfiguration conf = new YarnConfiguration();
    conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName());
    conf.set(PREFIX + "root.acl_submit_applications", " ");
    conf.set(PREFIX + "root.acl_administer_queue", " ");
    conf.set(PREFIX + "root.default.acl_submit_applications", " ");
    conf.set(PREFIX + "root.default.acl_administer_queue", " ");
    conf.set(YarnConfiguration.YARN_ACL_ENABLE, "true");
    MockRM mockRM = new MockRM(conf);
    ClientRMService rmService = mockRM.getClientRMService();
    SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
    ApplicationSubmissionContext sub = Records.newRecord(ApplicationSubmissionContext.class);
    sub.setApplicationId(appId);
    ResourceRequest resReg = ResourceRequest.newInstance(Priority.newInstance(0), ResourceRequest.ANY, Resource.newInstance(1024, 1), 1);
    sub.setAMContainerResourceRequest(resReg);
    req.setApplicationSubmissionContext(sub);
    sub.setAMContainerSpec(mock(ContainerLaunchContext.class));
    try {
        rmService.submitApplication(req);
    } catch (Exception e) {
        e.printStackTrace();
        if (e instanceof YarnException) {
            Assert.assertTrue(e.getCause() instanceof AccessControlException);
        } else {
            Assert.fail("Yarn exception is expected : " + e.getMessage());
        }
    } finally {
        mockRM.close();
    }
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) AccessControlException(org.apache.hadoop.security.AccessControlException) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) Test(org.junit.Test)

Example 3 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestCopyMapper method testPreserve.

@Test(timeout = 40000)
public void testPreserve() {
    try {
        deleteState();
        createSourceData();
        UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
        final CopyMapper copyMapper = new CopyMapper();
        final Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = tmpUser.doAs(new PrivilegedAction<Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text>>() {

            @Override
            public Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> run() {
                try {
                    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
                    return stubContext.getContext();
                } catch (Exception e) {
                    LOG.error("Exception encountered ", e);
                    throw new RuntimeException(e);
                }
            }
        });
        EnumSet<DistCpOptions.FileAttribute> preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class);
        preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
        preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
        context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, DistCpUtils.packAttributes(preserveStatus));
        touchFile(SOURCE_PATH + "/src/file");
        mkdirs(TARGET_PATH);
        cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short) 511));
        final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {

            @Override
            public FileSystem run() {
                try {
                    return FileSystem.get(configuration);
                } catch (IOException e) {
                    LOG.error("Exception encountered ", e);
                    Assert.fail("Test failed: " + e.getMessage());
                    throw new RuntimeException("Test ought to fail here");
                }
            }
        });
        tmpUser.doAs(new PrivilegedAction<Integer>() {

            @Override
            public Integer run() {
                try {
                    copyMapper.setup(context);
                    copyMapper.map(new Text("/src/file"), new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))), context);
                    Assert.fail("Expected copy to fail");
                } catch (AccessControlException e) {
                    Assert.assertTrue("Got exception: " + e.getMessage(), true);
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                return null;
            }
        });
    } catch (Exception e) {
        LOG.error("Exception encountered ", e);
        Assert.fail("Test failed: " + e.getMessage());
    }
}
Also used : StubContext(org.apache.hadoop.tools.StubContext) Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) Mapper(org.apache.hadoop.mapreduce.Mapper) DistCpOptions(org.apache.hadoop.tools.DistCpOptions) CopyListingFileStatus(org.apache.hadoop.tools.CopyListingFileStatus) StubContext(org.apache.hadoop.tools.StubContext) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 4 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestAggregatedLogDeletionService method testDeletion.

@Test
public void testDeletion() throws Exception {
    long now = System.currentTimeMillis();
    long toDeleteTime = now - (2000 * 1000);
    long toKeepTime = now - (1500 * 1000);
    String root = "mockfs://foo/";
    String remoteRootLogDir = root + "tmp/logs";
    String suffix = "logs";
    final Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800");
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
    Path rootPath = new Path(root);
    FileSystem rootFs = rootPath.getFileSystem(conf);
    FileSystem mockFs = ((FilterFileSystem) rootFs).getRawFileSystem();
    Path remoteRootLogPath = new Path(remoteRootLogDir);
    Path userDir = new Path(remoteRootLogPath, "me");
    FileStatus userDirStatus = new FileStatus(0, true, 0, 0, toKeepTime, userDir);
    when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[] { userDirStatus });
    ApplicationId appId1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    Path userLogDir = new Path(userDir, suffix);
    Path app1Dir = new Path(userLogDir, appId1.toString());
    FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
    ApplicationId appId2 = ApplicationId.newInstance(System.currentTimeMillis(), 2);
    Path app2Dir = new Path(userLogDir, appId2.toString());
    FileStatus app2DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app2Dir);
    ApplicationId appId3 = ApplicationId.newInstance(System.currentTimeMillis(), 3);
    Path app3Dir = new Path(userLogDir, appId3.toString());
    FileStatus app3DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app3Dir);
    ApplicationId appId4 = ApplicationId.newInstance(System.currentTimeMillis(), 4);
    Path app4Dir = new Path(userLogDir, appId4.toString());
    FileStatus app4DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app4Dir);
    ApplicationId appId5 = ApplicationId.newInstance(System.currentTimeMillis(), 5);
    Path app5Dir = new Path(userLogDir, appId5.toString());
    FileStatus app5DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app5Dir);
    when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[] { app1DirStatus, app2DirStatus, app3DirStatus, app4DirStatus, app5DirStatus });
    when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[] {});
    Path app2Log1 = new Path(app2Dir, "host1");
    FileStatus app2Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app2Log1);
    Path app2Log2 = new Path(app2Dir, "host2");
    FileStatus app2Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app2Log2);
    when(mockFs.listStatus(app2Dir)).thenReturn(new FileStatus[] { app2Log1Status, app2Log2Status });
    Path app3Log1 = new Path(app3Dir, "host1");
    FileStatus app3Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log1);
    Path app3Log2 = new Path(app3Dir, "host2");
    FileStatus app3Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log2);
    when(mockFs.delete(app3Dir, true)).thenThrow(new AccessControlException("Injected Error\nStack Trace :("));
    when(mockFs.listStatus(app3Dir)).thenReturn(new FileStatus[] { app3Log1Status, app3Log2Status });
    Path app4Log1 = new Path(app4Dir, "host1");
    FileStatus app4Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log1);
    Path app4Log2 = new Path(app4Dir, "host2");
    FileStatus app4Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log2);
    when(mockFs.listStatus(app4Dir)).thenReturn(new FileStatus[] { app4Log1Status, app4Log2Status });
    Path app5Log1 = new Path(app5Dir, "host1");
    FileStatus app5Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app5Log1);
    Path app5Log2 = new Path(app5Dir, "host2");
    FileStatus app5Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app5Log2);
    when(mockFs.listStatus(app5Dir)).thenReturn(new FileStatus[] { app5Log1Status, app5Log2Status });
    final List<ApplicationId> finishedApplications = Collections.unmodifiableList(Arrays.asList(appId1, appId2, appId3, appId4));
    final List<ApplicationId> runningApplications = Collections.unmodifiableList(Arrays.asList(appId5));
    AggregatedLogDeletionService deletionService = new AggregatedLogDeletionService() {

        @Override
        protected ApplicationClientProtocol creatRMClient() throws IOException {
            try {
                return createMockRMClient(finishedApplications, runningApplications);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }

        @Override
        protected void stopRMClient() {
        // DO NOTHING
        }
    };
    deletionService.init(conf);
    deletionService.start();
    verify(mockFs, timeout(2000)).delete(app1Dir, true);
    verify(mockFs, timeout(2000).times(0)).delete(app2Dir, true);
    verify(mockFs, timeout(2000)).delete(app3Dir, true);
    verify(mockFs, timeout(2000)).delete(app4Dir, true);
    verify(mockFs, timeout(2000).times(0)).delete(app5Dir, true);
    verify(mockFs, timeout(2000)).delete(app5Log1, true);
    verify(mockFs, timeout(2000).times(0)).delete(app5Log2, true);
    deletionService.stop();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 5 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestNonAggregatingLogHandler method runMockedFailedDirs.

/**
   * Function to run a log handler with directories failing the getFileStatus
   * call. The function accepts the log handler, setup the mocks to fail with
   * specific exceptions and ensures the deletion service has the correct calls.
   * 
   * @param logHandler the logHandler implementation to test
   * 
   * @param appId the application id that we wish when sending events to the log
   * handler
   * 
   * @param user the user name to use
   * 
   * @param mockDelService a mock of the DeletionService which we will verify
   * the delete calls against
   * 
   * @param dirsHandler a spy or mock on the LocalDirsHandler service used to
   * when creating the logHandler. It needs to be a spy so that we can intercept
   * the getAllLogDirs() call.
   * 
   * @param conf the configuration used
   * 
   * @param spylfs a spy on the AbstractFileSystem object used when creating lfs
   * 
   * @param lfs the FileContext object to be used to mock the getFileStatus()
   * calls
   * 
   * @param localLogDirs list of the log dirs to run the test against, must have
   * at least 7 entries
   */
public static void runMockedFailedDirs(LogHandler logHandler, ApplicationId appId, String user, DeletionService mockDelService, LocalDirsHandlerService dirsHandler, Configuration conf, AbstractFileSystem spylfs, FileContext lfs, File[] localLogDirs) throws Exception {
    Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>();
    if (localLogDirs.length < 7) {
        throw new IllegalArgumentException("Argument localLogDirs must be at least of length 7");
    }
    Path[] localAppLogDirPaths = new Path[localLogDirs.length];
    for (int i = 0; i < localAppLogDirPaths.length; i++) {
        localAppLogDirPaths[i] = new Path(localLogDirs[i].getAbsolutePath(), appId.toString());
    }
    final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
    for (int i = 0; i < localLogDirs.length; i++) {
        localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
    }
    // setup mocks
    FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask());
    final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(localLogDirs[0].getAbsolutePath()));
    doReturn(fs).when(spylfs).getFileStatus(isA(Path.class));
    doReturn(localLogDirPaths).when(dirsHandler).getLogDirsForCleanup();
    logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, appAcls));
    // test case where some dirs have the log dir to delete
    // mock some dirs throwing various exceptions
    // verify deletion happens only on the others
    Mockito.doThrow(new FileNotFoundException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[0]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[1]));
    Mockito.doThrow(new AccessControlException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[2]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[3]));
    Mockito.doThrow(new IOException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[4]));
    Mockito.doThrow(new UnsupportedFileSystemException("test")).when(spylfs).getFileStatus(eq(localAppLogDirPaths[5]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[6]));
    logHandler.handle(new LogHandlerAppFinishedEvent(appId));
    testDeletionServiceCall(mockDelService, user, 5000, localAppLogDirPaths[1], localAppLogDirPaths[3], localAppLogDirPaths[6]);
    return;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) LogHandlerAppStartedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) LogHandlerAppFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

AccessControlException (org.apache.hadoop.security.AccessControlException)165 Path (org.apache.hadoop.fs.Path)72 IOException (java.io.IOException)69 Test (org.junit.Test)60 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)44 FsPermission (org.apache.hadoop.fs.permission.FsPermission)41 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)35 FileSystem (org.apache.hadoop.fs.FileSystem)33 Configuration (org.apache.hadoop.conf.Configuration)25 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)21 FileNotFoundException (java.io.FileNotFoundException)19 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 UnsupportedEncodingException (java.io.UnsupportedEncodingException)11 HashMap (java.util.HashMap)10 FileStatus (org.apache.hadoop.fs.FileStatus)10 ClientResponse (com.sun.jersey.api.client.ClientResponse)9 PrivilegedAction (java.security.PrivilegedAction)9 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)9 RESTResponse (org.apache.ranger.admin.client.datatype.RESTResponse)9