Search in sources :

Example 51 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestNonAggregatingLogHandler method testFailedDirLogDeletion.

/*
   * Test to ensure that we handle the cleanup of directories that may not have
   * the application log dirs we're trying to delete or may have other problems.
   * Test creates 7 log dirs, and fails the directory check for 4 of them and
   * then checks to ensure we tried to delete only the ones that passed the
   * check.
   */
@Test
public void testFailedDirLogDeletion() throws Exception {
    File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 7);
    final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
    for (int i = 0; i < localLogDirs.length; i++) {
        localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
    }
    String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
    conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
    conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l);
    LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
    NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, mockDirsHandler, new NMNullStateStoreService());
    NonAggregatingLogHandler logHandler = spy(rawLogHandler);
    AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
    FileContext lfs = FileContext.getFileContext(spylfs, conf);
    doReturn(lfs).when(logHandler).getLocalFileContext(isA(Configuration.class));
    logHandler.init(conf);
    logHandler.start();
    runMockedFailedDirs(logHandler, appId, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
    logHandler.close();
}
Also used : AbstractFileSystem(org.apache.hadoop.fs.AbstractFileSystem) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ArrayList(java.util.ArrayList) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) File(java.io.File) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 52 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestLogAggregationService method testFailedDirsLocalFileDeletionAfterUpload.

/*
   * Test to make sure we handle cases where the directories we get back from
   * the LocalDirsHandler may have issues including the log dir not being
   * present as well as other issues. The test uses helper functions from
   * TestNonAggregatingLogHandler.
   */
@Test
public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception {
    // setup conf and services
    DeletionService mockDelService = mock(DeletionService.class);
    File[] localLogDirs = TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass().getName(), 7);
    final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
    for (int i = 0; i < localLogDirs.length; i++) {
        localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
    }
    String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
    this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
    this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);
    ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1);
    this.dirsHandler = new LocalDirsHandlerService();
    LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
    LogAggregationService logAggregationService = spy(new LogAggregationService(dispatcher, this.context, mockDelService, mockDirsHandler));
    AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
    FileContext lfs = FileContext.getFileContext(spylfs, conf);
    doReturn(lfs).when(logAggregationService).getLocalFileContext(isA(Configuration.class));
    logAggregationService.init(this.conf);
    logAggregationService.start();
    TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService, application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
    logAggregationService.stop();
    assertEquals(0, logAggregationService.getNumAggregators());
    verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
    ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
    checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ArrayList(java.util.ArrayList) ApplicationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) AbstractFileSystem(org.apache.hadoop.fs.AbstractFileSystem) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 53 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestFsck method testFsckSymlink.

/** Test fsck with symlinks in the filesystem. */
@Test
public void testFsckSymlink() throws Exception {
    final DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    FileSystem fs = null;
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    final String fileName = "/srcdat";
    util.createFiles(fs, fileName);
    final FileContext fc = FileContext.getFileContext(cluster.getConfiguration(0));
    final Path file = new Path(fileName);
    final Path symlink = new Path("/srcdat-symlink");
    fc.createSymlink(file, symlink, false);
    util.waitReplication(fs, fileName, (short) 3);
    long aTime = fc.getFileStatus(symlink).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("Total symlinks:\t\t1"));
    util.cleanup(fs, fileName);
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 54 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestPermissionSymlinks method doRenameLinkTargetNotWritableFC.

private void doRenameLinkTargetNotWritableFC() throws Exception {
    // Rename the link when the target and parent are not writable
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws IOException {
            // First FileContext
            FileContext myfc = FileContext.getFileContext(conf);
            Path newlink = new Path(linkParent, "newlink");
            myfc.rename(link, newlink, Rename.NONE);
            Path linkTarget = myfc.getLinkTarget(newlink);
            assertEquals("Expected link's target to match target!", target, linkTarget);
            return null;
        }
    });
    assertTrue("Expected target to exist", wrapper.exists(target));
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) FileContext(org.apache.hadoop.fs.FileContext)

Example 55 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestPermissionSymlinks method testAccess.

@Test
public void testAccess() throws Exception {
    fs.setPermission(target, new FsPermission((short) 0002));
    fs.setAcl(target, Arrays.asList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, USER, user.getShortUserName(), WRITE), aclEntry(ACCESS, OTHER, WRITE)));
    FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {

        @Override
        public FileContext run() throws IOException {
            return FileContext.getFileContext(conf);
        }
    });
    // Path to targetChild via symlink
    myfc.access(link, FsAction.WRITE);
    try {
        myfc.access(link, FsAction.ALL);
        fail("The access call should have failed.");
    } catch (AccessControlException e) {
    // expected
    }
    Path badPath = new Path(link, "bad");
    try {
        myfc.access(badPath, FsAction.READ);
        fail("The access call should have failed");
    } catch (AccessControlException ace) {
        // expected
        String message = ace.getMessage();
        assertTrue(message, message.contains("is not a directory"));
        assertTrue(message.contains(target.toString()));
        assertFalse(message.contains(badPath.toString()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Aggregations

FileContext (org.apache.hadoop.fs.FileContext)84 Path (org.apache.hadoop.fs.Path)71 Test (org.junit.Test)34 Configuration (org.apache.hadoop.conf.Configuration)33 IOException (java.io.IOException)29 File (java.io.File)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)14 FileStatus (org.apache.hadoop.fs.FileStatus)13 HashMap (java.util.HashMap)12 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)8 ExecutorService (java.util.concurrent.ExecutorService)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 URISyntaxException (java.net.URISyntaxException)6 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 ExecutionException (java.util.concurrent.ExecutionException)6 Future (java.util.concurrent.Future)6 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)6