Search in sources :

Example 46 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestContainerLaunch method testPrependDistcache.

@Test
public void testPrependDistcache() throws Exception {
    // Test is only relevant on Windows
    assumeWindows();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    Map<String, String> userSetEnv = new HashMap<String, String>();
    userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
    userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
    userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
    userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
    userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
    userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key());
    userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME");
    userSetEnv.put(Environment.PWD.name(), "user_set_PWD");
    userSetEnv.put(Environment.HOME.name(), "user_set_HOME");
    userSetEnv.put(Environment.CLASSPATH.name(), "APATH");
    containerLaunchContext.setEnvironment(userSetEnv);
    Container container = mock(Container.class);
    when(container.getContainerId()).thenReturn(cId);
    when(container.getLaunchContext()).thenReturn(containerLaunchContext);
    when(container.getLocalizedResources()).thenReturn(null);
    Dispatcher dispatcher = mock(Dispatcher.class);
    EventHandler<Event> eventHandler = new EventHandler<Event>() {

        public void handle(Event event) {
            Assert.assertTrue(event instanceof ContainerExitEvent);
            ContainerExitEvent exitEvent = (ContainerExitEvent) event;
            Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitEvent.getType());
        }
    };
    when(dispatcher.getEventHandler()).thenReturn(eventHandler);
    Configuration conf = new Configuration();
    ContainerLaunch launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager);
    String testDir = System.getProperty("test.build.data", "target/test-dir");
    Path pwd = new Path(testDir);
    List<Path> appDirs = new ArrayList<Path>();
    List<String> userLocalDirs = new ArrayList<>();
    List<String> containerLogs = new ArrayList<String>();
    Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
    Path userjar = new Path("user.jar");
    List<String> lpaths = new ArrayList<String>();
    lpaths.add("userjarlink.jar");
    resources.put(userjar, lpaths);
    Path nmp = new Path(testDir);
    launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, resources, nmp);
    List<String> result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
    Assert.assertTrue(result.size() > 1);
    Assert.assertTrue(result.get(result.size() - 1).endsWith("userjarlink.jar"));
    //Then, with user classpath first
    userSetEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
    cId = ContainerId.newContainerId(appAttemptId, 1);
    when(container.getContainerId()).thenReturn(cId);
    launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager);
    launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, resources, nmp);
    result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
    Assert.assertTrue(result.size() > 1);
    Assert.assertTrue(result.get(0).endsWith("userjarlink.jar"));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) HashMap(java.util.HashMap) ContainerExitEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent) ArrayList(java.util.ArrayList) EventHandler(org.apache.hadoop.yarn.event.EventHandler) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent) Event(org.apache.hadoop.yarn.event.Event) ContainerExitEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent) List(java.util.List) ArrayList(java.util.ArrayList) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 47 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestAppLogAggregatorImpl method createAppLogAggregator.

private static AppLogAggregatorInTest createAppLogAggregator(ApplicationId applicationId, String rootLogDir, YarnConfiguration config, long recoveredLogInitedTimeMillis, DeletionService deletionServiceWithFilesToExpect) throws IOException {
    final Dispatcher dispatcher = createNullDispatcher();
    final NodeId nodeId = NodeId.newInstance("localhost", 0);
    final String userId = "AppLogAggregatorTest";
    final UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userId);
    final LocalDirsHandlerService dirsService = createLocalDirsHandlerService(config, rootLogDir);
    final DeletionService deletionService = deletionServiceWithFilesToExpect;
    final LogAggregationContext logAggregationContext = null;
    final Map<ApplicationAccessType, String> appAcls = new HashMap<>();
    final Context context = createContext(config);
    final FileContext fakeLfs = mock(FileContext.class);
    final Path remoteLogDirForApp = new Path(REMOTE_LOG_FILE.getAbsolutePath());
    return new AppLogAggregatorInTest(dispatcher, deletionService, config, applicationId, ugi, nodeId, dirsService, remoteLogDirForApp, appAcls, logAggregationContext, context, fakeLfs, recoveredLogInitedTimeMillis);
}
Also used : FileContext(org.apache.hadoop.fs.FileContext) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) ContainerLogContext(org.apache.hadoop.yarn.server.api.ContainerLogContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) NodeId(org.apache.hadoop.yarn.api.records.NodeId) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext) FileContext(org.apache.hadoop.fs.FileContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 48 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestNMWebServicesApps method addAppContainers.

private HashMap<String, String> addAppContainers(Application app) throws IOException {
    Dispatcher dispatcher = new AsyncDispatcher();
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app.getAppId(), 1);
    Container container1 = new MockContainer(appAttemptId, dispatcher, conf, app.getUser(), app.getAppId(), 1);
    Container container2 = new MockContainer(appAttemptId, dispatcher, conf, app.getUser(), app.getAppId(), 2);
    nmContext.getContainers().put(container1.getContainerId(), container1);
    nmContext.getContainers().put(container2.getContainerId(), container2);
    app.getContainers().put(container1.getContainerId(), container1);
    app.getContainers().put(container2.getContainerId(), container2);
    HashMap<String, String> hash = new HashMap<String, String>();
    hash.put(container1.getContainerId().toString(), container1.getContainerId().toString());
    hash.put(container2.getContainerId().toString(), container2.getContainerId().toString());
    return hash;
}
Also used : Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) GuiceContainer(com.sun.jersey.guice.spi.container.servlet.GuiceContainer) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) HashMap(java.util.HashMap) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher)

Example 49 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestNMWebServer method testNMWebApp.

@Test
public void testNMWebApp() throws IOException, YarnException {
    Configuration conf = new Configuration();
    Context nmContext = new NodeManager.NMContext(null, null, null, null, null, false, conf);
    ResourceView resourceView = new ResourceView() {

        @Override
        public long getVmemAllocatedForContainers() {
            return 0;
        }

        @Override
        public long getPmemAllocatedForContainers() {
            return 0;
        }

        @Override
        public long getVCoresAllocatedForContainers() {
            return 0;
        }

        @Override
        public boolean isVmemCheckEnabled() {
            return true;
        }

        @Override
        public boolean isPmemCheckEnabled() {
            return true;
        }
    };
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
    NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
    WebServer server = new WebServer(nmContext, resourceView, new ApplicationACLsManager(conf), dirsHandler);
    server.init(conf);
    server.start();
    // Add an application and the corresponding containers
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
    Dispatcher dispatcher = new AsyncDispatcher();
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    nmContext.getApplications().put(appId, app);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
    ContainerId container2 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
    NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
    NMStateStoreService stateStore = new NMNullStateStoreService();
    for (ContainerId containerId : new ContainerId[] { container1, container2 }) {
        // TODO: Use builder utils
        ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
        long currentTime = System.currentTimeMillis();
        Token containerToken = BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234, user, BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123, "password".getBytes(), currentTime);
        Context context = mock(Context.class);
        Container container = new ContainerImpl(conf, dispatcher, launchContext, null, metrics, BuilderUtils.newContainerTokenIdentifier(containerToken), context) {

            @Override
            public ContainerState getContainerState() {
                return ContainerState.RUNNING;
            }

            ;
        };
        nmContext.getContainers().put(containerId, container);
        //TODO: Gross hack. Fix in code.
        ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
        nmContext.getApplications().get(applicationId).getContainers().put(containerId, container);
        writeContainerLogs(nmContext, containerId, dirsHandler);
    }
// TODO: Pull logs and test contents.
//    Thread.sleep(1000000);
}
Also used : ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) Context(org.apache.hadoop.yarn.server.nodemanager.Context) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) NodeHealthCheckerService(org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService) Token(org.apache.hadoop.yarn.api.records.Token) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) NMNullStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService) NMStateStoreService(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService) ResourceView(org.apache.hadoop.yarn.server.nodemanager.ResourceView) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerImpl(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl) NodeManagerMetrics(org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) Test(org.junit.Test)

Example 50 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class ResourceManager method setupDispatcher.

/**
   * Register the handlers for alwaysOn services
   */
private Dispatcher setupDispatcher() {
    Dispatcher dispatcher = createDispatcher();
    dispatcher.register(RMFatalEventType.class, new ResourceManager.RMFatalEventDispatcher());
    return dispatcher;
}
Also used : AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) EventDispatcher(org.apache.hadoop.yarn.event.EventDispatcher)

Aggregations

Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)55 Test (org.junit.Test)35 Configuration (org.apache.hadoop.conf.Configuration)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)21 Event (org.apache.hadoop.yarn.event.Event)18 AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)15 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)14 FileContext (org.apache.hadoop.fs.FileContext)11 DrainDispatcher (org.apache.hadoop.yarn.event.DrainDispatcher)11 EventHandler (org.apache.hadoop.yarn.event.EventHandler)10 NMContext (org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext)10 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 IOException (java.io.IOException)8 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)7 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)7 Container (org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container)7 Path (org.apache.hadoop.fs.Path)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 ArrayList (java.util.ArrayList)5