Search in sources :

Example 46 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class LocalDirsHandlerService method serviceInit.

/**
   * Method which initializes the timertask and its interval time.
   * 
   */
@Override
protected void serviceInit(Configuration config) throws Exception {
    // Clone the configuration as we may do modifications to dirs-list
    Configuration conf = new Configuration(config);
    diskHealthCheckInterval = conf.getLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS);
    monitoringTimerTask = new MonitoringTimerTask(conf);
    isDiskHealthCheckerEnabled = conf.getBoolean(YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true);
    minNeededHealthyDisksFactor = conf.getFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION);
    lastDisksCheckTime = System.currentTimeMillis();
    super.serviceInit(conf);
    FileContext localFs;
    try {
        localFs = FileContext.getLocalFSFileContext(config);
    } catch (IOException e) {
        throw new YarnRuntimeException("Unable to get the local filesystem", e);
    }
    FsPermission perm = new FsPermission((short) 0755);
    boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
    createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
    if (!createSucceeded) {
        updateDirsAfterTest();
    }
    // Check the disk health immediately to weed out bad directories
    // before other init code attempts to use them.
    checkDirs();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FileContext(org.apache.hadoop.fs.FileContext)

Example 47 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestDefaultContainerExecutor method testStartLocalizer.

@Test(timeout = 30000)
public void testStartLocalizer() throws IOException, InterruptedException, YarnException {
    final Path firstDir = new Path(BASE_TMP_PATH, "localDir1");
    List<String> localDirs = new ArrayList<String>();
    final Path secondDir = new Path(BASE_TMP_PATH, "localDir2");
    List<String> logDirs = new ArrayList<String>();
    final Path logDir = new Path(BASE_TMP_PATH, "logDir");
    final Path tokenDir = new Path(BASE_TMP_PATH, "tokenDir");
    FsPermission perms = new FsPermission((short) 0770);
    Configuration conf = new Configuration();
    final FileContext mockLfs = spy(FileContext.getLocalFSFileContext(conf));
    final FileContext.Util mockUtil = spy(mockLfs.util());
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            return mockUtil;
        }
    }).when(mockLfs).util();
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            Path dest = (Path) invocationOnMock.getArguments()[1];
            if (dest.toString().contains(firstDir.toString())) {
                // to simulate no space on the first drive
                throw new IOException("No space on this drive " + dest.toString());
            } else {
                // copy token to the second local dir
                DataOutputStream tokenOut = null;
                try {
                    Credentials credentials = new Credentials();
                    tokenOut = mockLfs.create(dest, EnumSet.of(CREATE, OVERWRITE));
                    credentials.writeTokenStorageToStream(tokenOut);
                } finally {
                    if (tokenOut != null) {
                        tokenOut.close();
                    }
                }
            }
            return null;
        }
    }).when(mockUtil).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            Path p = (Path) invocationOnMock.getArguments()[0];
            // first local directory
            if (p.toString().contains(firstDir.toString())) {
                return new FsStatus(2000, 2000, 0);
            } else {
                return new FsStatus(1000, 0, 1000);
            }
        }
    }).when(mockLfs).getFsStatus(any(Path.class));
    DefaultContainerExecutor mockExec = spy(new DefaultContainerExecutor(mockLfs) {

        @Override
        public ContainerLocalizer createContainerLocalizer(String user, String appId, String locId, List<String> localDirs, FileContext localizerFc) throws IOException {
            // Spy on the localizer and make it return valid heart-beat
            // responses even though there is no real NodeManager.
            ContainerLocalizer localizer = super.createContainerLocalizer(user, appId, locId, localDirs, localizerFc);
            ContainerLocalizer spyLocalizer = spy(localizer);
            LocalizationProtocol nmProxy = mock(LocalizationProtocol.class);
            try {
                when(nmProxy.heartbeat(isA(LocalizerStatus.class))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE, new ArrayList<ResourceLocalizationSpec>()));
            } catch (YarnException e) {
                throw new IOException(e);
            }
            when(spyLocalizer.getProxy(any(InetSocketAddress.class))).thenReturn(nmProxy);
            return spyLocalizer;
        }
    });
    mockExec.setConf(conf);
    localDirs.add(mockLfs.makeQualified(firstDir).toString());
    localDirs.add(mockLfs.makeQualified(secondDir).toString());
    logDirs.add(mockLfs.makeQualified(logDir).toString());
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, localDirs.toArray(new String[localDirs.size()]));
    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
    mockLfs.mkdir(tokenDir, perms, true);
    Path nmPrivateCTokensPath = new Path(tokenDir, "test.tokens");
    String appSubmitter = "nobody";
    String appId = "APP_ID";
    String locId = "LOC_ID";
    LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
    when(dirsHandler.getLocalDirs()).thenReturn(localDirs);
    when(dirsHandler.getLogDirs()).thenReturn(logDirs);
    try {
        mockExec.startLocalizer(new LocalizerStartContext.Builder().setNmPrivateContainerTokens(nmPrivateCTokensPath).setNmAddr(null).setUser(appSubmitter).setAppId(appId).setLocId(locId).setDirsHandler(dirsHandler).build());
    } catch (IOException e) {
        Assert.fail("StartLocalizer failed to copy token file: " + StringUtils.stringifyException(e));
    } finally {
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(firstDir).build());
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(secondDir).build());
        mockExec.deleteAsUser(new DeletionAsUserContext.Builder().setUser(appSubmitter).setSubDir(logDir).build());
        deleteTmpFiles();
    }
    // Verify that the calls happen the expected number of times
    verify(mockUtil, times(1)).copy(any(Path.class), any(Path.class), anyBoolean(), anyBoolean());
    verify(mockLfs, times(2)).getFsStatus(any(Path.class));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockLocalizerHeartbeatResponse(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.MockLocalizerHeartbeatResponse) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ArrayList(java.util.ArrayList) FsStatus(org.apache.hadoop.fs.FsStatus) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ResourceLocalizationSpec(org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) LocalizationProtocol(org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ContainerLocalizer(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer) FileContext(org.apache.hadoop.fs.FileContext) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 48 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestContainerManagerRecovery method testContainerResizeRecovery.

@Test
public void testContainerResizeRecovery() throws Exception {
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true);
    NMStateStoreService stateStore = new NMMemoryStateStoreService();
    stateStore.init(conf);
    stateStore.start();
    Context context = createContext(conf, stateStore);
    ContainerManagerImpl cm = createContainerManager(context, delSrvc);
    cm.init(conf);
    cm.start();
    // add an application by starting a container
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cid = ContainerId.newContainerId(attemptId, 1);
    Map<String, String> containerEnv = Collections.emptyMap();
    Map<String, ByteBuffer> serviceData = Collections.emptyMap();
    Credentials containerCreds = new Credentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    containerCreds.writeTokenStorageToStream(dob);
    ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    Map<ApplicationAccessType, String> acls = Collections.emptyMap();
    File tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
    File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    if (Shell.WINDOWS) {
        fileWriter.println("@ping -n 100 127.0.0.1 >nul");
    } else {
        fileWriter.write("\numask 0");
        fileWriter.write("\nexec sleep 100");
    }
    fileWriter.close();
    FileContext localFS = FileContext.getLocalFSFileContext();
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<>();
    localResources.put(destinationFile, rsrc_alpha);
    List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, containerEnv, commands, serviceData, containerTokens, acls);
    StartContainersResponse startResponse = startContainer(context, cm, cid, clc, null);
    assertTrue(startResponse.getFailedRequests().isEmpty());
    assertEquals(1, context.getApplications().size());
    Application app = context.getApplications().get(appId);
    assertNotNull(app);
    // make sure the container reaches RUNNING state
    waitForNMContainerState(cm, cid, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
    Resource targetResource = Resource.newInstance(2048, 2);
    IncreaseContainersResourceResponse increaseResponse = increaseContainersResource(context, cm, cid, targetResource);
    assertTrue(increaseResponse.getFailedRequests().isEmpty());
    // check status
    ContainerStatus containerStatus = getContainerStatus(context, cm, cid);
    assertEquals(targetResource, containerStatus.getCapability());
    // restart and verify container is running and recovered
    // to the correct size
    cm.stop();
    context = createContext(conf, stateStore);
    cm = createContainerManager(context);
    cm.init(conf);
    cm.start();
    assertEquals(1, context.getApplications().size());
    app = context.getApplications().get(appId);
    assertNotNull(app);
    containerStatus = getContainerStatus(context, cm, cid);
    assertEquals(targetResource, containerStatus.getCapability());
}
Also used : HashMap(java.util.HashMap) URL(org.apache.hadoop.yarn.api.records.URL) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) IncreaseContainersResourceResponse(org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse) NMMemoryStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService) PrintWriter(java.io.PrintWriter) FileContext(org.apache.hadoop.fs.FileContext) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) Path(org.apache.hadoop.fs.Path) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) NMStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Credentials(org.apache.hadoop.security.Credentials) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 49 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestAppLogAggregatorImpl method createAppLogAggregator.

private static AppLogAggregatorInTest createAppLogAggregator(ApplicationId applicationId, String rootLogDir, YarnConfiguration config, long recoveredLogInitedTimeMillis, DeletionService deletionServiceWithFilesToExpect) throws IOException {
    final Dispatcher dispatcher = createNullDispatcher();
    final NodeId nodeId = NodeId.newInstance("localhost", 0);
    final String userId = "AppLogAggregatorTest";
    final UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userId);
    final LocalDirsHandlerService dirsService = createLocalDirsHandlerService(config, rootLogDir);
    final DeletionService deletionService = deletionServiceWithFilesToExpect;
    final LogAggregationContext logAggregationContext = null;
    final Map<ApplicationAccessType, String> appAcls = new HashMap<>();
    final Context context = createContext(config);
    final FileContext fakeLfs = mock(FileContext.class);
    final Path remoteLogDirForApp = new Path(REMOTE_LOG_FILE.getAbsolutePath());
    return new AppLogAggregatorInTest(dispatcher, deletionService, config, applicationId, ugi, nodeId, dirsService, remoteLogDirForApp, appAcls, logAggregationContext, context, fakeLfs, recoveredLogInitedTimeMillis);
}
Also used : FileContext(org.apache.hadoop.fs.FileContext) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) ContainerLogContext(org.apache.hadoop.yarn.server.api.ContainerLogContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) NodeId(org.apache.hadoop.yarn.api.records.NodeId) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) FileContext(org.apache.hadoop.fs.FileContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 50 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestLocalResourcesTrackerImpl method testGetPathForLocalization.

@Test
@SuppressWarnings("unchecked")
public void testGetPathForLocalization() throws Exception {
    FileContext lfs = FileContext.getLocalFSFileContext();
    Path base_path = new Path("target", TestLocalResourcesTrackerImpl.class.getSimpleName());
    final String user = "someuser";
    final ApplicationId appId = ApplicationId.newInstance(1, 1);
    Configuration conf = new YarnConfiguration();
    DrainDispatcher dispatcher = null;
    dispatcher = createDispatcher(conf);
    EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class);
    EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class);
    dispatcher.register(LocalizerEventType.class, localizerEventHandler);
    dispatcher.register(ContainerEventType.class, containerEventHandler);
    NMStateStoreService stateStore = mock(NMStateStoreService.class);
    DeletionService delService = mock(DeletionService.class);
    try {
        LocalResourceRequest req1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
        LocalizedResource lr1 = createLocalizedResource(req1, dispatcher);
        ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
        localrsrc.put(req1, lr1);
        LocalResourcesTrackerImpl tracker = new LocalResourcesTrackerImpl(user, appId, dispatcher, localrsrc, true, conf, stateStore, null);
        Path conflictPath = new Path(base_path, "10");
        Path qualifiedConflictPath = lfs.makeQualified(conflictPath);
        lfs.mkdir(qualifiedConflictPath, null, true);
        Path rPath = tracker.getPathForLocalization(req1, base_path, delService);
        Assert.assertFalse(lfs.util().exists(rPath));
        verify(delService, times(1)).delete(eq(user), eq(conflictPath));
    } finally {
        lfs.delete(base_path, true);
        if (dispatcher != null) {
            dispatcher.stop();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) NMStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService) LocalizerEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Aggregations

FileContext (org.apache.hadoop.fs.FileContext)84 Path (org.apache.hadoop.fs.Path)71 Test (org.junit.Test)34 Configuration (org.apache.hadoop.conf.Configuration)33 IOException (java.io.IOException)29 File (java.io.File)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)14 FileStatus (org.apache.hadoop.fs.FileStatus)13 HashMap (java.util.HashMap)12 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)8 ExecutorService (java.util.concurrent.ExecutorService)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 URISyntaxException (java.net.URISyntaxException)6 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 ExecutionException (java.util.concurrent.ExecutionException)6 Future (java.util.concurrent.Future)6 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)6