Search in sources :

Example 1 with UnsupportedFileSystemException

use of org.apache.hadoop.fs.UnsupportedFileSystemException in project elasticsearch by elastic.

the class HdfsBlobStoreContainerTests method createContext.

@SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)")
private FileContext createContext(URI uri) {
    // mirrors HdfsRepository.java behaviour
    Configuration cfg = new Configuration(true);
    cfg.setClassLoader(HdfsRepository.class.getClassLoader());
    cfg.reloadConfiguration();
    Constructor<?> ctor;
    Subject subject;
    try {
        Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
        ctor = clazz.getConstructor(String.class);
        ctor.setAccessible(true);
    } catch (ClassNotFoundException | NoSuchMethodException e) {
        throw new RuntimeException(e);
    }
    try {
        Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
        subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet());
    } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
        throw new RuntimeException(e);
    }
    // disable file system cache
    cfg.setBoolean("fs.hdfs.impl.disable.cache", true);
    // set file system to TestingFs to avoid a bunch of security
    // checks, similar to what is done in HdfsTests.java
    cfg.set("fs.AbstractFileSystem." + uri.getScheme() + ".impl", TestingFs.class.getName());
    // create the FileContext with our user
    return Subject.doAs(subject, (PrivilegedAction<FileContext>) () -> {
        try {
            TestingFs fs = (TestingFs) AbstractFileSystem.get(uri, cfg);
            return FileContext.getFileContext(fs, cfg);
        } catch (UnsupportedFileSystemException e) {
            throw new RuntimeException(e);
        }
    });
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Subject(javax.security.auth.Subject) InvocationTargetException(java.lang.reflect.InvocationTargetException) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) Principal(java.security.Principal) FileContext(org.apache.hadoop.fs.FileContext) SuppressForbidden(org.elasticsearch.common.SuppressForbidden)

Example 2 with UnsupportedFileSystemException

use of org.apache.hadoop.fs.UnsupportedFileSystemException in project hadoop by apache.

the class TestNonAggregatingLogHandler method runMockedFailedDirs.

/**
   * Function to run a log handler with directories failing the getFileStatus
   * call. The function accepts the log handler, setup the mocks to fail with
   * specific exceptions and ensures the deletion service has the correct calls.
   * 
   * @param logHandler the logHandler implementation to test
   * 
   * @param appId the application id that we wish when sending events to the log
   * handler
   * 
   * @param user the user name to use
   * 
   * @param mockDelService a mock of the DeletionService which we will verify
   * the delete calls against
   * 
   * @param dirsHandler a spy or mock on the LocalDirsHandler service used to
   * when creating the logHandler. It needs to be a spy so that we can intercept
   * the getAllLogDirs() call.
   * 
   * @param conf the configuration used
   * 
   * @param spylfs a spy on the AbstractFileSystem object used when creating lfs
   * 
   * @param lfs the FileContext object to be used to mock the getFileStatus()
   * calls
   * 
   * @param localLogDirs list of the log dirs to run the test against, must have
   * at least 7 entries
   */
public static void runMockedFailedDirs(LogHandler logHandler, ApplicationId appId, String user, DeletionService mockDelService, LocalDirsHandlerService dirsHandler, Configuration conf, AbstractFileSystem spylfs, FileContext lfs, File[] localLogDirs) throws Exception {
    Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>();
    if (localLogDirs.length < 7) {
        throw new IllegalArgumentException("Argument localLogDirs must be at least of length 7");
    }
    Path[] localAppLogDirPaths = new Path[localLogDirs.length];
    for (int i = 0; i < localAppLogDirPaths.length; i++) {
        localAppLogDirPaths[i] = new Path(localLogDirs[i].getAbsolutePath(), appId.toString());
    }
    final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
    for (int i = 0; i < localLogDirs.length; i++) {
        localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
    }
    // setup mocks
    FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask());
    final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(localLogDirs[0].getAbsolutePath()));
    doReturn(fs).when(spylfs).getFileStatus(isA(Path.class));
    doReturn(localLogDirPaths).when(dirsHandler).getLogDirsForCleanup();
    logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, appAcls));
    // test case where some dirs have the log dir to delete
    // mock some dirs throwing various exceptions
    // verify deletion happens only on the others
    Mockito.doThrow(new FileNotFoundException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[0]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[1]));
    Mockito.doThrow(new AccessControlException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[2]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[3]));
    Mockito.doThrow(new IOException()).when(spylfs).getFileStatus(eq(localAppLogDirPaths[4]));
    Mockito.doThrow(new UnsupportedFileSystemException("test")).when(spylfs).getFileStatus(eq(localAppLogDirPaths[5]));
    doReturn(fs).when(spylfs).getFileStatus(eq(localAppLogDirPaths[6]));
    logHandler.handle(new LogHandlerAppFinishedEvent(appId));
    testDeletionServiceCall(mockDelService, user, 5000, localAppLogDirPaths[1], localAppLogDirPaths[3], localAppLogDirPaths[6]);
    return;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) LogHandlerAppStartedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) LogHandlerAppFinishedEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 3 with UnsupportedFileSystemException

use of org.apache.hadoop.fs.UnsupportedFileSystemException in project hadoop by apache.

the class AppLogAggregatorImpl method doAppLogAggregationPostCleanUp.

private void doAppLogAggregationPostCleanUp() {
    // Remove the local app-log-dirs
    List<Path> localAppLogDirs = new ArrayList<Path>();
    for (String rootLogDir : dirsHandler.getLogDirsForCleanup()) {
        Path logPath = new Path(rootLogDir, applicationId);
        try {
            // check if log dir exists
            lfs.getFileStatus(logPath);
            localAppLogDirs.add(logPath);
        } catch (UnsupportedFileSystemException ue) {
            LOG.warn("Log dir " + rootLogDir + "is an unsupported file system", ue);
            continue;
        } catch (IOException fe) {
            continue;
        }
    }
    if (localAppLogDirs.size() > 0) {
        this.delService.delete(this.userUgi.getShortUserName(), null, localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) IOException(java.io.IOException)

Example 4 with UnsupportedFileSystemException

use of org.apache.hadoop.fs.UnsupportedFileSystemException in project hadoop by apache.

the class TestResourceLocalizationService method testFailedDirsResourceRelease.

/*
   * Test to ensure ResourceLocalizationService can handle local dirs going bad.
   * Test first sets up all the components required, then sends events to fetch
   * a private, app and public resource. It then sends events to clean up the
   * container and the app and ensures the right delete calls were made.
   */
@Test
@SuppressWarnings("unchecked")
public // mocked generics
void testFailedDirsResourceRelease() throws Exception {
    // setup components
    File f = new File(basedir.toString());
    String[] sDirs = new String[4];
    List<Path> localDirs = new ArrayList<Path>(sDirs.length);
    for (int i = 0; i < 4; ++i) {
        sDirs[i] = f.getAbsolutePath() + i;
        localDirs.add(new Path(sDirs[i]));
    }
    List<Path> containerLocalDirs = new ArrayList<Path>(localDirs.size());
    List<Path> appLocalDirs = new ArrayList<Path>(localDirs.size());
    List<Path> nmLocalContainerDirs = new ArrayList<Path>(localDirs.size());
    List<Path> nmLocalAppDirs = new ArrayList<Path>(localDirs.size());
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
    conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);
    LocalizerTracker mockLocallilzerTracker = mock(LocalizerTracker.class);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
    dispatcher.register(ApplicationEventType.class, applicationBus);
    EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
    dispatcher.register(ContainerEventType.class, containerBus);
    // Ignore actual localization
    EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class);
    dispatcher.register(LocalizerEventType.class, localizerBus);
    ContainerExecutor exec = mock(ContainerExecutor.class);
    LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
    doReturn(new ArrayList<String>(Arrays.asList(sDirs))).when(mockDirsHandler).getLocalDirsForCleanup();
    DeletionService delService = mock(DeletionService.class);
    // setup mocks
    ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, mockDirsHandler, nmContext);
    ResourceLocalizationService spyService = spy(rawService);
    doReturn(mockServer).when(spyService).createServer();
    doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker(isA(Configuration.class));
    doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
    FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask());
    FsPermission nmPermission = ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
    final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", localDirs.get(0));
    final FileStatus nmFs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, nmPermission, "", "", localDirs.get(0));
    final String user = "user0";
    // init application
    final Application app = mock(Application.class);
    final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    when(app.toString()).thenReturn(appId.toString());
    // init container.
    final Container c = getMockContainer(appId, 42, user);
    // setup local app dirs
    List<String> tmpDirs = mockDirsHandler.getLocalDirs();
    for (int i = 0; i < tmpDirs.size(); ++i) {
        Path usersdir = new Path(tmpDirs.get(i), ContainerLocalizer.USERCACHE);
        Path userdir = new Path(usersdir, user);
        Path allAppsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
        Path appDir = new Path(allAppsdir, appId.toString());
        Path containerDir = new Path(appDir, c.getContainerId().toString());
        containerLocalDirs.add(containerDir);
        appLocalDirs.add(appDir);
        Path sysDir = new Path(tmpDirs.get(i), ResourceLocalizationService.NM_PRIVATE_DIR);
        Path appSysDir = new Path(sysDir, appId.toString());
        Path containerSysDir = new Path(appSysDir, c.getContainerId().toString());
        nmLocalContainerDirs.add(containerSysDir);
        nmLocalAppDirs.add(appSysDir);
    }
    try {
        spyService.init(conf);
        spyService.start();
        spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
        dispatcher.await();
        // Get a handle on the trackers after they're setup with
        // INIT_APP_RESOURCES
        LocalResourcesTracker appTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user, appId);
        LocalResourcesTracker privTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user, appId);
        LocalResourcesTracker pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, user, appId);
        // init resources
        Random r = new Random();
        long seed = r.nextLong();
        r.setSeed(seed);
        // Send localization requests, one for each type of resource
        final LocalResource privResource = getPrivateMockedResource(r);
        final LocalResourceRequest privReq = new LocalResourceRequest(privResource);
        final LocalResource appResource = getAppMockedResource(r);
        final LocalResourceRequest appReq = new LocalResourceRequest(appResource);
        final LocalResource pubResource = getPublicMockedResource(r);
        final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource);
        Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
        req.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq));
        req.put(LocalResourceVisibility.APPLICATION, Collections.singletonList(appReq));
        req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq));
        Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req2 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
        req2.put(LocalResourceVisibility.PRIVATE, Collections.singletonList(privReq));
        // Send Request event
        spyService.handle(new ContainerLocalizationRequestEvent(c, req));
        spyService.handle(new ContainerLocalizationRequestEvent(c, req2));
        dispatcher.await();
        int privRsrcCount = 0;
        for (LocalizedResource lr : privTracker) {
            privRsrcCount++;
            Assert.assertEquals("Incorrect reference count", 2, lr.getRefCount());
            Assert.assertEquals(privReq, lr.getRequest());
        }
        Assert.assertEquals(1, privRsrcCount);
        int appRsrcCount = 0;
        for (LocalizedResource lr : appTracker) {
            appRsrcCount++;
            Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount());
            Assert.assertEquals(appReq, lr.getRequest());
        }
        Assert.assertEquals(1, appRsrcCount);
        int pubRsrcCount = 0;
        for (LocalizedResource lr : pubTracker) {
            pubRsrcCount++;
            Assert.assertEquals("Incorrect reference count", 1, lr.getRefCount());
            Assert.assertEquals(pubReq, lr.getRequest());
        }
        Assert.assertEquals(1, pubRsrcCount);
        // go through
        for (int i = 0; i < containerLocalDirs.size(); ++i) {
            if (i == 2) {
                Mockito.doThrow(new IOException()).when(spylfs).getFileStatus(eq(containerLocalDirs.get(i)));
                Mockito.doThrow(new IOException()).when(spylfs).getFileStatus(eq(nmLocalContainerDirs.get(i)));
            } else {
                doReturn(fs).when(spylfs).getFileStatus(eq(containerLocalDirs.get(i)));
                doReturn(nmFs).when(spylfs).getFileStatus(eq(nmLocalContainerDirs.get(i)));
            }
        }
        // Send Cleanup Event
        spyService.handle(new ContainerLocalizationCleanupEvent(c, req));
        verify(mockLocallilzerTracker).cleanupPrivLocalizers("container_314159265358979_0003_01_000042");
        // match cleanup events with the mocks we setup earlier
        for (int i = 0; i < containerLocalDirs.size(); ++i) {
            if (i == 2) {
                try {
                    verify(delService).delete(user, containerLocalDirs.get(i));
                    verify(delService).delete(null, nmLocalContainerDirs.get(i));
                    Assert.fail("deletion attempts for invalid dirs");
                } catch (Throwable e) {
                    continue;
                }
            } else {
                verify(delService).delete(user, containerLocalDirs.get(i));
                verify(delService).delete(null, nmLocalContainerDirs.get(i));
            }
        }
        ArgumentMatcher<ApplicationEvent> matchesAppDestroy = new ArgumentMatcher<ApplicationEvent>() {

            @Override
            public boolean matches(Object o) {
                ApplicationEvent evt = (ApplicationEvent) o;
                return (evt.getType() == ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP) && appId == evt.getApplicationID();
            }
        };
        dispatcher.await();
        // IOExceptions
        for (int i = 0; i < containerLocalDirs.size(); ++i) {
            if (i == 3) {
                Mockito.doThrow(new IOException()).when(spylfs).getFileStatus(eq(appLocalDirs.get(i)));
                Mockito.doThrow(new UnsupportedFileSystemException("test")).when(spylfs).getFileStatus(eq(nmLocalAppDirs.get(i)));
            } else {
                doReturn(fs).when(spylfs).getFileStatus(eq(appLocalDirs.get(i)));
                doReturn(nmFs).when(spylfs).getFileStatus(eq(nmLocalAppDirs.get(i)));
            }
        }
        LocalizationEvent destroyApp = new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES, app);
        spyService.handle(destroyApp);
        // Waits for APPLICATION_RESOURCES_CLEANEDUP event to be handled.
        dispatcher.await();
        verify(applicationBus).handle(argThat(matchesAppDestroy));
        // verify we got the right delete calls
        for (int i = 0; i < containerLocalDirs.size(); ++i) {
            if (i == 3) {
                try {
                    verify(delService).delete(user, containerLocalDirs.get(i));
                    verify(delService).delete(null, nmLocalContainerDirs.get(i));
                    Assert.fail("deletion attempts for invalid dirs");
                } catch (Throwable e) {
                    continue;
                }
            } else {
                verify(delService).delete(user, appLocalDirs.get(i));
                verify(delService).delete(null, nmLocalAppDirs.get(i));
            }
        }
    } finally {
        dispatcher.stop();
        delService.stop();
    }
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) ContainerExecutor(org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor) DefaultContainerExecutor(org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor) FileStatus(org.apache.hadoop.fs.FileStatus) ContainerLocalizationRequestEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ResourceFailedLocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceFailedLocalizationEvent) ApplicationLocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent) LocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent) ContainerLocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent) LocalResourceVisibility(org.apache.hadoop.yarn.api.records.LocalResourceVisibility) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) Random(java.util.Random) ArgumentMatcher(org.mockito.ArgumentMatcher) ApplicationLocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Path(org.apache.hadoop.fs.Path) ContainerEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent) ApplicationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ContainerLocalizationCleanupEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent) IOException(java.io.IOException) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) LocalizerEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent) LocalizerTracker(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.LocalizerTracker) Collection(java.util.Collection) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 5 with UnsupportedFileSystemException

use of org.apache.hadoop.fs.UnsupportedFileSystemException in project hadoop by apache.

the class DistributedFileSystem method getFileChecksum.

@Override
public FileChecksum getFileChecksum(Path f, final long length) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_CHECKSUM);
    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FileChecksum>() {

        @Override
        public FileChecksum doCall(final Path p) throws IOException {
            return dfs.getFileChecksum(getPathName(p), length);
        }

        @Override
        public FileChecksum next(final FileSystem fs, final Path p) throws IOException {
            if (fs instanceof DistributedFileSystem) {
                return fs.getFileChecksum(p, length);
            } else {
                throw new UnsupportedFileSystemException("getFileChecksum(Path, long) is not supported by " + fs.getClass().getSimpleName());
            }
        }
    }.resolve(this, absF);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) IOException(java.io.IOException) FileChecksum(org.apache.hadoop.fs.FileChecksum)

Aggregations

UnsupportedFileSystemException (org.apache.hadoop.fs.UnsupportedFileSystemException)15 IOException (java.io.IOException)8 Path (org.apache.hadoop.fs.Path)8 Configuration (org.apache.hadoop.conf.Configuration)6 FileSystem (org.apache.hadoop.fs.FileSystem)4 FileNotFoundException (java.io.FileNotFoundException)3 URI (java.net.URI)3 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 FileContext (org.apache.hadoop.fs.FileContext)3 AccessControlException (org.apache.hadoop.security.AccessControlException)3 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)3 URISyntaxException (java.net.URISyntaxException)2 Principal (java.security.Principal)2 Subject (javax.security.auth.Subject)2 AbstractFileSystem (org.apache.hadoop.fs.AbstractFileSystem)2 FileChecksum (org.apache.hadoop.fs.FileChecksum)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 URL (org.apache.hadoop.yarn.api.records.URL)2