Search in sources :

Example 11 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestAppManager method mockRMContext.

public RMContext mockRMContext(int n, long time) {
    final List<RMApp> apps = newRMApps(n, time, RMAppState.FINISHED);
    final ConcurrentMap<ApplicationId, RMApp> map = Maps.newConcurrentMap();
    for (RMApp app : apps) {
        map.put(app.getApplicationId(), app);
    }
    Dispatcher rmDispatcher = new AsyncDispatcher();
    ContainerAllocationExpirer containerAllocationExpirer = new ContainerAllocationExpirer(rmDispatcher);
    AMLivelinessMonitor amLivelinessMonitor = new AMLivelinessMonitor(rmDispatcher);
    AMLivelinessMonitor amFinishingMonitor = new AMLivelinessMonitor(rmDispatcher);
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    RMContext context = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, null, null, null, null, null) {

        @Override
        public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
            return map;
        }
    };
    ((RMContextImpl) context).setStateStore(mock(RMStateStore.class));
    metricsPublisher = mock(SystemMetricsPublisher.class);
    context.setSystemMetricsPublisher(metricsPublisher);
    context.setRMApplicationHistoryWriter(writer);
    return context;
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) MockRMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp) RMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore) ContainerAllocationExpirer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) AMLivelinessMonitor(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 12 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class NodeManager method serviceInit.

@Override
protected void serviceInit(Configuration conf) throws Exception {
    conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
    rmWorkPreservingRestartEnabled = conf.getBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED);
    try {
        initAndStartRecoveryStore(conf);
    } catch (IOException e) {
        String recoveryDirName = conf.get(YarnConfiguration.NM_RECOVERY_DIR);
        throw new YarnRuntimeException("Unable to initialize recovery directory at " + recoveryDirName, e);
    }
    NMContainerTokenSecretManager containerTokenSecretManager = new NMContainerTokenSecretManager(conf, nmStore);
    NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(nmStore);
    recoverTokens(nmTokenSecretManager, containerTokenSecretManager);
    this.aclsManager = new ApplicationACLsManager(conf);
    ContainerExecutor exec = ReflectionUtils.newInstance(conf.getClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class), conf);
    try {
        exec.init();
    } catch (IOException e) {
        throw new YarnRuntimeException("Failed to initialize container executor", e);
    }
    DeletionService del = createDeletionService(exec);
    addService(del);
    // NodeManager level dispatcher
    this.dispatcher = new AsyncDispatcher("NM Event dispatcher");
    dirsHandler = new LocalDirsHandlerService(metrics);
    nodeHealthChecker = new NodeHealthCheckerService(getNodeHealthScriptRunner(conf), dirsHandler);
    addService(nodeHealthChecker);
    boolean isDistSchedulingEnabled = conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, YarnConfiguration.DEFAULT_DIST_SCHEDULING_ENABLED);
    this.context = createNMContext(containerTokenSecretManager, nmTokenSecretManager, nmStore, isDistSchedulingEnabled, conf);
    ((NMContext) context).setContainerExecutor(exec);
    nodeLabelsProvider = createNodeLabelsProvider(conf);
    if (null == nodeLabelsProvider) {
        nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker);
    } else {
        addIfService(nodeLabelsProvider);
        nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker, nodeLabelsProvider);
    }
    nodeResourceMonitor = createNodeResourceMonitor();
    addService(nodeResourceMonitor);
    ((NMContext) context).setNodeResourceMonitor(nodeResourceMonitor);
    containerManager = createContainerManager(context, exec, del, nodeStatusUpdater, this.aclsManager, dirsHandler);
    addService(containerManager);
    ((NMContext) context).setContainerManager(containerManager);
    WebServer webServer = createWebServer(context, containerManager.getContainersMonitor(), this.aclsManager, dirsHandler);
    addService(webServer);
    ((NMContext) context).setWebServer(webServer);
    ((NMContext) context).setQueueableContainerAllocator(new OpportunisticContainerAllocator(context.getContainerTokenSecretManager()));
    dispatcher.register(ContainerManagerEventType.class, containerManager);
    dispatcher.register(NodeManagerEventType.class, this);
    addService(dispatcher);
    pauseMonitor = new JvmPauseMonitor();
    addService(pauseMonitor);
    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
    DefaultMetricsSystem.initialize("NodeManager");
    if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
        this.nmCollectorService = createNMCollectorService(context);
        addService(nmCollectorService);
    }
    // StatusUpdater should be added last so that it get started last 
    // so that we make sure everything is up before registering with RM. 
    addService(nodeStatusUpdater);
    ((NMContext) context).setNodeStatusUpdater(nodeStatusUpdater);
    super.serviceInit(conf);
// TODO add local dirs to del
}
Also used : NMTokenSecretManagerInNM(org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM) IOException(java.io.IOException) JvmPauseMonitor(org.apache.hadoop.util.JvmPauseMonitor) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) WebServer(org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer) NMContainerTokenSecretManager(org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager) OpportunisticContainerAllocator(org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator)

Example 13 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogPageAccess.

@Test(timeout = 10000)
public void testContainerLogPageAccess() throws IOException {
    // SecureIOUtils require Native IO to be enabled. This test will run
    // only if it is enabled.
    assumeTrue(NativeIO.isAvailable());
    String user = "randomUser" + System.currentTimeMillis();
    File absLogDir = null, appDir = null, containerDir = null, syslog = null;
    try {
        // target log directory
        absLogDir = new File("target", TestContainerLogsPage.class.getSimpleName() + "LogDir").getAbsoluteFile();
        absLogDir.mkdir();
        Configuration conf = new Configuration();
        conf.set(YarnConfiguration.NM_LOG_DIRS, absLogDir.toURI().toString());
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
        healthChecker.init(conf);
        LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
        // Add an application and the corresponding containers
        RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
        long clusterTimeStamp = 1234;
        ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
        Application app = mock(Application.class);
        when(app.getAppId()).thenReturn(appId);
        // Making sure that application returns a random user. This is required
        // for SecureIOUtils' file owner check.
        when(app.getUser()).thenReturn(user);
        ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
        ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
        // Testing secure read access for log files
        // Creating application and container directory and syslog file.
        appDir = new File(absLogDir, appId.toString());
        appDir.mkdir();
        containerDir = new File(appDir, container1.toString());
        containerDir.mkdir();
        syslog = new File(containerDir, "syslog");
        syslog.createNewFile();
        BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(syslog));
        out.write("Log file Content".getBytes());
        out.close();
        Context context = mock(Context.class);
        ConcurrentMap<ApplicationId, Application> appMap = new ConcurrentHashMap<ApplicationId, Application>();
        appMap.put(appId, app);
        when(context.getApplications()).thenReturn(appMap);
        ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();
        when(context.getContainers()).thenReturn(containers);
        when(context.getLocalDirsHandler()).thenReturn(dirsHandler);
        MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
        container.setState(ContainerState.RUNNING);
        context.getContainers().put(container1, container);
        ContainersLogsBlock cLogsBlock = new ContainersLogsBlock(context);
        Map<String, String> params = new HashMap<String, String>();
        params.put(YarnWebParams.CONTAINER_ID, container1.toString());
        params.put(YarnWebParams.CONTAINER_LOG_TYPE, "syslog");
        Injector injector = WebAppTests.testPage(ContainerLogsPage.class, ContainersLogsBlock.class, cLogsBlock, params, (Module[]) null);
        PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
        verify(spyPw).write("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : syslog");
    } finally {
        if (syslog != null) {
            syslog.delete();
        }
        if (containerDir != null) {
            containerDir.delete();
        }
        if (appDir != null) {
            appDir.delete();
        }
        if (absLogDir != null) {
            absLogDir.delete();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeHealthCheckerService(org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Injector(com.google.inject.Injector) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BufferedOutputStream(java.io.BufferedOutputStream) PrintWriter(java.io.PrintWriter) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) ContainersLogsBlock(org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsPage.ContainersLogsBlock) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) FileOutputStream(java.io.FileOutputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Module(com.google.inject.Module) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 14 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogDirs.

@Test(timeout = 30000)
public void testContainerLogDirs() throws IOException, YarnException {
    File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
    String logdirwithFile = absLogDir.toURI().toString();
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
    NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
    NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    // Add an application and the corresponding containers
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
    nmContext.getApplications().put(appId, app);
    MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(container1, container);
    List<File> files = null;
    files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    Assert.assertTrue(!(files.get(0).toString().contains("file:")));
    // After container is completed, it is removed from nmContext
    nmContext.getContainers().remove(container1);
    Assert.assertNull(nmContext.getContainers().get(container1));
    files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    Assert.assertTrue(!(files.get(0).toString().contains("file:")));
    // Create a new context to check if correct container log dirs are fetched
    // on full disk.
    LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
    // good log dirs are empty and nm log dir is in the full log dir list.
    when(dirsHandlerForFullDisk.getLogDirs()).thenReturn(new ArrayList<String>());
    when(dirsHandlerForFullDisk.getLogDirsForRead()).thenReturn(Arrays.asList(new String[] { absLogDir.getAbsolutePath() }));
    nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    nmContext.getApplications().put(appId, app);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(container1, container);
    List<File> dirs = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
    File containerLogDir = new File(absLogDir, appId + "/" + container1);
    Assert.assertTrue(dirs.contains(containerLogDir));
}
Also used : NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeHealthCheckerService(org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 15 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestContainerLogsPage method testContainerLogFile.

@Test(timeout = 30000)
public void testContainerLogFile() throws IOException, YarnException {
    File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
    String logdirwithFile = absLogDir.toURI().toString();
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
    conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f);
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    dirsHandler.init(conf);
    NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
    // Add an application and the corresponding containers
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId = BuilderUtils.newApplicationId(clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    nmContext.getApplications().put(appId, app);
    MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
    container.setState(ContainerState.RUNNING);
    nmContext.getContainers().put(containerId, container);
    File containerLogDir = new File(absLogDir, ContainerLaunch.getRelativeContainerLogDir(appId.toString(), containerId.toString()));
    containerLogDir.mkdirs();
    String fileName = "fileName";
    File containerLogFile = new File(containerLogDir, fileName);
    containerLogFile.createNewFile();
    File file = ContainerLogsUtils.getContainerLogFile(containerId, fileName, user, nmContext);
    Assert.assertEquals(containerLogFile.toURI().toString(), file.toURI().toString());
    FileUtil.fullyDelete(absLogDir);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) NMContext(org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Aggregations

AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)51 Test (org.junit.Test)32 Configuration (org.apache.hadoop.conf.Configuration)28 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)20 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)13 Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)12 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)11 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)11 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)9 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)8 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)7 JobStartEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)7 Before (org.junit.Before)7 ArrayList (java.util.ArrayList)6 JobContext (org.apache.hadoop.mapreduce.JobContext)6 LocalDirsHandlerService (org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService)6