Search in sources :

Example 6 with FilterFileSystem

use of org.apache.hadoop.fs.FilterFileSystem in project hadoop by apache.

the class TestChRootedFileSystem method testUnsetStoragePolicy.

@Test(timeout = 30000)
public void testUnsetStoragePolicy() throws Exception {
    Path storagePolicyPath = new Path("/storagePolicy");
    Path chRootedStoragePolicyPath = new Path("/a/b/storagePolicy");
    Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    URI chrootUri = URI.create("mockfs://foo/a/b");
    ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
    FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem()).getRawFileSystem();
    chrootFs.unsetStoragePolicy(storagePolicyPath);
    verify(mockFs).unsetStoragePolicy(chRootedStoragePolicyPath);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ChRootedFileSystem(org.apache.hadoop.fs.viewfs.ChRootedFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) URI(java.net.URI) ChRootedFileSystem(org.apache.hadoop.fs.viewfs.ChRootedFileSystem) Test(org.junit.Test)

Example 7 with FilterFileSystem

use of org.apache.hadoop.fs.FilterFileSystem in project hadoop by apache.

the class TestAggregatedLogDeletionService method testDeletion.

@Test
public void testDeletion() throws Exception {
    long now = System.currentTimeMillis();
    long toDeleteTime = now - (2000 * 1000);
    long toKeepTime = now - (1500 * 1000);
    String root = "mockfs://foo/";
    String remoteRootLogDir = root + "tmp/logs";
    String suffix = "logs";
    final Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800");
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
    Path rootPath = new Path(root);
    FileSystem rootFs = rootPath.getFileSystem(conf);
    FileSystem mockFs = ((FilterFileSystem) rootFs).getRawFileSystem();
    Path remoteRootLogPath = new Path(remoteRootLogDir);
    Path userDir = new Path(remoteRootLogPath, "me");
    FileStatus userDirStatus = new FileStatus(0, true, 0, 0, toKeepTime, userDir);
    when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[] { userDirStatus });
    ApplicationId appId1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    Path userLogDir = new Path(userDir, suffix);
    Path app1Dir = new Path(userLogDir, appId1.toString());
    FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
    ApplicationId appId2 = ApplicationId.newInstance(System.currentTimeMillis(), 2);
    Path app2Dir = new Path(userLogDir, appId2.toString());
    FileStatus app2DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app2Dir);
    ApplicationId appId3 = ApplicationId.newInstance(System.currentTimeMillis(), 3);
    Path app3Dir = new Path(userLogDir, appId3.toString());
    FileStatus app3DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app3Dir);
    ApplicationId appId4 = ApplicationId.newInstance(System.currentTimeMillis(), 4);
    Path app4Dir = new Path(userLogDir, appId4.toString());
    FileStatus app4DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app4Dir);
    ApplicationId appId5 = ApplicationId.newInstance(System.currentTimeMillis(), 5);
    Path app5Dir = new Path(userLogDir, appId5.toString());
    FileStatus app5DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app5Dir);
    when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[] { app1DirStatus, app2DirStatus, app3DirStatus, app4DirStatus, app5DirStatus });
    when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[] {});
    Path app2Log1 = new Path(app2Dir, "host1");
    FileStatus app2Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app2Log1);
    Path app2Log2 = new Path(app2Dir, "host2");
    FileStatus app2Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app2Log2);
    when(mockFs.listStatus(app2Dir)).thenReturn(new FileStatus[] { app2Log1Status, app2Log2Status });
    Path app3Log1 = new Path(app3Dir, "host1");
    FileStatus app3Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log1);
    Path app3Log2 = new Path(app3Dir, "host2");
    FileStatus app3Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app3Log2);
    when(mockFs.delete(app3Dir, true)).thenThrow(new AccessControlException("Injected Error\nStack Trace :("));
    when(mockFs.listStatus(app3Dir)).thenReturn(new FileStatus[] { app3Log1Status, app3Log2Status });
    Path app4Log1 = new Path(app4Dir, "host1");
    FileStatus app4Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log1);
    Path app4Log2 = new Path(app4Dir, "host2");
    FileStatus app4Log2Status = new FileStatus(10, false, 1, 1, toDeleteTime, app4Log2);
    when(mockFs.listStatus(app4Dir)).thenReturn(new FileStatus[] { app4Log1Status, app4Log2Status });
    Path app5Log1 = new Path(app5Dir, "host1");
    FileStatus app5Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app5Log1);
    Path app5Log2 = new Path(app5Dir, "host2");
    FileStatus app5Log2Status = new FileStatus(10, false, 1, 1, toKeepTime, app5Log2);
    when(mockFs.listStatus(app5Dir)).thenReturn(new FileStatus[] { app5Log1Status, app5Log2Status });
    final List<ApplicationId> finishedApplications = Collections.unmodifiableList(Arrays.asList(appId1, appId2, appId3, appId4));
    final List<ApplicationId> runningApplications = Collections.unmodifiableList(Arrays.asList(appId5));
    AggregatedLogDeletionService deletionService = new AggregatedLogDeletionService() {

        @Override
        protected ApplicationClientProtocol creatRMClient() throws IOException {
            try {
                return createMockRMClient(finishedApplications, runningApplications);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }

        @Override
        protected void stopRMClient() {
        // DO NOTHING
        }
    };
    deletionService.init(conf);
    deletionService.start();
    verify(mockFs, timeout(2000)).delete(app1Dir, true);
    verify(mockFs, timeout(2000).times(0)).delete(app2Dir, true);
    verify(mockFs, timeout(2000)).delete(app3Dir, true);
    verify(mockFs, timeout(2000)).delete(app4Dir, true);
    verify(mockFs, timeout(2000).times(0)).delete(app5Dir, true);
    verify(mockFs, timeout(2000)).delete(app5Log1, true);
    verify(mockFs, timeout(2000).times(0)).delete(app5Log2, true);
    deletionService.stop();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 8 with FilterFileSystem

use of org.apache.hadoop.fs.FilterFileSystem in project hadoop by apache.

the class TestAggregatedLogDeletionService method testCheckInterval.

@Test
public void testCheckInterval() throws Exception {
    long RETENTION_SECS = 10 * 24 * 3600;
    long now = System.currentTimeMillis();
    long toDeleteTime = now - RETENTION_SECS * 1000;
    String root = "mockfs://foo/";
    String remoteRootLogDir = root + "tmp/logs";
    String suffix = "logs";
    Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "864000");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "1");
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
    // prevent us from picking up the same mockfs instance from another test
    FileSystem.closeAll();
    Path rootPath = new Path(root);
    FileSystem rootFs = rootPath.getFileSystem(conf);
    FileSystem mockFs = ((FilterFileSystem) rootFs).getRawFileSystem();
    Path remoteRootLogPath = new Path(remoteRootLogDir);
    Path userDir = new Path(remoteRootLogPath, "me");
    FileStatus userDirStatus = new FileStatus(0, true, 0, 0, now, userDir);
    when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[] { userDirStatus });
    ApplicationId appId1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    Path userLogDir = new Path(userDir, suffix);
    Path app1Dir = new Path(userLogDir, appId1.toString());
    FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, now, app1Dir);
    when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[] { app1DirStatus });
    Path app1Log1 = new Path(app1Dir, "host1");
    FileStatus app1Log1Status = new FileStatus(10, false, 1, 1, now, app1Log1);
    when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[] { app1Log1Status });
    final List<ApplicationId> finishedApplications = Collections.unmodifiableList(Arrays.asList(appId1));
    AggregatedLogDeletionService deletionSvc = new AggregatedLogDeletionService() {

        @Override
        protected ApplicationClientProtocol creatRMClient() throws IOException {
            try {
                return createMockRMClient(finishedApplications, null);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }

        @Override
        protected void stopRMClient() {
        // DO NOTHING
        }
    };
    deletionSvc.init(conf);
    deletionSvc.start();
    verify(mockFs, timeout(10000).atLeast(4)).listStatus(any(Path.class));
    verify(mockFs, never()).delete(app1Dir, true);
    // modify the timestamp of the logs and verify it's picked up quickly
    app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
    app1Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app1Log1);
    when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[] { app1DirStatus });
    when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[] { app1Log1Status });
    verify(mockFs, timeout(10000)).delete(app1Dir, true);
    deletionSvc.stop();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) IOException(java.io.IOException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) Test(org.junit.Test)

Example 9 with FilterFileSystem

use of org.apache.hadoop.fs.FilterFileSystem in project hadoop by apache.

the class TestAggregatedLogDeletionService method testRefreshLogRetentionSettings.

@Test
public void testRefreshLogRetentionSettings() throws Exception {
    long now = System.currentTimeMillis();
    //time before 2000 sec
    long before2000Secs = now - (2000 * 1000);
    //time before 50 sec
    long before50Secs = now - (50 * 1000);
    String root = "mockfs://foo/";
    String remoteRootLogDir = root + "tmp/logs";
    String suffix = "logs";
    final Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "1800");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "1");
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
    Path rootPath = new Path(root);
    FileSystem rootFs = rootPath.getFileSystem(conf);
    FileSystem mockFs = ((FilterFileSystem) rootFs).getRawFileSystem();
    Path remoteRootLogPath = new Path(remoteRootLogDir);
    Path userDir = new Path(remoteRootLogPath, "me");
    FileStatus userDirStatus = new FileStatus(0, true, 0, 0, before50Secs, userDir);
    when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[] { userDirStatus });
    Path userLogDir = new Path(userDir, suffix);
    ApplicationId appId1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    //Set time last modified of app1Dir directory and its files to before2000Secs 
    Path app1Dir = new Path(userLogDir, appId1.toString());
    FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, before2000Secs, app1Dir);
    ApplicationId appId2 = ApplicationId.newInstance(System.currentTimeMillis(), 2);
    //Set time last modified of app1Dir directory and its files to before50Secs 
    Path app2Dir = new Path(userLogDir, appId2.toString());
    FileStatus app2DirStatus = new FileStatus(0, true, 0, 0, before50Secs, app2Dir);
    when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[] { app1DirStatus, app2DirStatus });
    Path app1Log1 = new Path(app1Dir, "host1");
    FileStatus app1Log1Status = new FileStatus(10, false, 1, 1, before2000Secs, app1Log1);
    when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[] { app1Log1Status });
    Path app2Log1 = new Path(app2Dir, "host1");
    FileStatus app2Log1Status = new FileStatus(10, false, 1, 1, before50Secs, app2Log1);
    when(mockFs.listStatus(app2Dir)).thenReturn(new FileStatus[] { app2Log1Status });
    final List<ApplicationId> finishedApplications = Collections.unmodifiableList(Arrays.asList(appId1, appId2));
    AggregatedLogDeletionService deletionSvc = new AggregatedLogDeletionService() {

        @Override
        protected Configuration createConf() {
            return conf;
        }

        @Override
        protected ApplicationClientProtocol creatRMClient() throws IOException {
            try {
                return createMockRMClient(finishedApplications, null);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }

        @Override
        protected void stopRMClient() {
        // DO NOTHING
        }
    };
    deletionSvc.init(conf);
    deletionSvc.start();
    //app1Dir would be deleted since its done above log retention period
    verify(mockFs, timeout(10000)).delete(app1Dir, true);
    //app2Dir is not expected to be deleted since its below the threshold
    verify(mockFs, timeout(3000).times(0)).delete(app2Dir, true);
    //Now,lets change the confs
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "50");
    conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "2");
    //We have not called refreshLogSettings,hence don't expect to see the changed conf values
    Assert.assertTrue(2000l != deletionSvc.getCheckIntervalMsecs());
    //refresh the log settings
    deletionSvc.refreshLogRetentionSettings();
    //Check interval time should reflect the new value
    Assert.assertTrue(2000l == deletionSvc.getCheckIntervalMsecs());
    //app2Dir should be deleted since it falls above the threshold
    verify(mockFs, timeout(10000)).delete(app2Dir, true);
    deletionSvc.stop();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) IOException(java.io.IOException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) Test(org.junit.Test)

Example 10 with FilterFileSystem

use of org.apache.hadoop.fs.FilterFileSystem in project hadoop by apache.

the class TestMRApps method testSetupDistributedCache.

@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testSetupDistributedCache() throws Exception {
    Configuration conf = new Configuration();
    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
    URI mockUri = URI.create("mockfs://mock/");
    FileSystem mockFs = ((FilterFileSystem) FileSystem.get(mockUri, conf)).getRawFileSystem();
    URI archive = new URI("mockfs://mock/tmp/something.zip");
    Path archivePath = new Path(archive);
    URI file = new URI("mockfs://mock/tmp/something.txt#something");
    Path filePath = new Path(file);
    when(mockFs.resolvePath(archivePath)).thenReturn(archivePath);
    when(mockFs.resolvePath(filePath)).thenReturn(filePath);
    DistributedCache.addCacheArchive(archive, conf);
    conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, "10");
    conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES, "10");
    conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, "true");
    DistributedCache.addCacheFile(file, conf);
    conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "11");
    conf.set(MRJobConfig.CACHE_FILES_SIZES, "11");
    conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true");
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);
    assertEquals(2, localResources.size());
    LocalResource lr = localResources.get("something.zip");
    assertNotNull(lr);
    assertEquals(10l, lr.getSize());
    assertEquals(10l, lr.getTimestamp());
    assertEquals(LocalResourceType.ARCHIVE, lr.getType());
    lr = localResources.get("something");
    assertNotNull(lr);
    assertEquals(11l, lr.getSize());
    assertEquals(11l, lr.getTimestamp());
    assertEquals(LocalResourceType.FILE, lr.getType());
}
Also used : Path(org.apache.hadoop.fs.Path) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) URI(java.net.URI) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)15 FileSystem (org.apache.hadoop.fs.FileSystem)15 FilterFileSystem (org.apache.hadoop.fs.FilterFileSystem)15 Test (org.junit.Test)15 Path (org.apache.hadoop.fs.Path)14 URI (java.net.URI)12 ChRootedFileSystem (org.apache.hadoop.fs.viewfs.ChRootedFileSystem)9 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 FileStatus (org.apache.hadoop.fs.FileStatus)4 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 AccessControlException (org.apache.hadoop.security.AccessControlException)3 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)3 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)3 AclEntry (org.apache.hadoop.fs.permission.AclEntry)1