Search in sources :

Example 16 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestResourceLocalizationService method testLocalizationInit.

@Test
public void testLocalizationInit() throws Exception {
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(new Configuration());
    ContainerExecutor exec = mock(ContainerExecutor.class);
    DeletionService delService = spy(new DeletionService(exec));
    delService.init(conf);
    delService.start();
    List<Path> localDirs = new ArrayList<Path>();
    String[] sDirs = new String[4];
    for (int i = 0; i < 4; ++i) {
        localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
        sDirs[i] = localDirs.get(i).toString();
    }
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
    LocalDirsHandlerService diskhandler = new LocalDirsHandlerService();
    diskhandler.init(conf);
    ResourceLocalizationService locService = spy(new ResourceLocalizationService(dispatcher, exec, delService, diskhandler, nmContext));
    doReturn(lfs).when(locService).getLocalFileContext(isA(Configuration.class));
    try {
        dispatcher.start();
        // initialize ResourceLocalizationService
        locService.init(conf);
        final FsPermission defaultPerm = new FsPermission((short) 0755);
        // verify directory creation
        for (Path p : localDirs) {
            p = new Path((new URI(p.toString())).getPath());
            Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
            verify(spylfs).mkdir(eq(usercache), eq(defaultPerm), eq(true));
            Path publicCache = new Path(p, ContainerLocalizer.FILECACHE);
            verify(spylfs).mkdir(eq(publicCache), eq(defaultPerm), eq(true));
            Path nmPriv = new Path(p, ResourceLocalizationService.NM_PRIVATE_DIR);
            verify(spylfs).mkdir(eq(nmPriv), eq(ResourceLocalizationService.NM_PRIVATE_PERM), eq(true));
        }
    } finally {
        dispatcher.stop();
        delService.stop();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerExecutor(org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor) DefaultContainerExecutor(org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ArrayList(java.util.ArrayList) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) URI(java.net.URI) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 17 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestRMDispatcher method testSchedulerEventDispatcherForPreemptionEvents.

@SuppressWarnings("unchecked")
@Test(timeout = 10000)
public void testSchedulerEventDispatcherForPreemptionEvents() {
    AsyncDispatcher rmDispatcher = new AsyncDispatcher();
    CapacityScheduler sched = spy(new CapacityScheduler());
    YarnConfiguration conf = new YarnConfiguration();
    EventDispatcher schedulerDispatcher = new EventDispatcher(sched, sched.getClass().getName());
    rmDispatcher.register(SchedulerEventType.class, schedulerDispatcher);
    rmDispatcher.init(conf);
    rmDispatcher.start();
    schedulerDispatcher.init(conf);
    schedulerDispatcher.start();
    try {
        ApplicationAttemptId appAttemptId = mock(ApplicationAttemptId.class);
        RMContainer container = mock(RMContainer.class);
        ContainerPreemptEvent event1 = new ContainerPreemptEvent(appAttemptId, container, SchedulerEventType.KILL_RESERVED_CONTAINER);
        rmDispatcher.getEventHandler().handle(event1);
        ContainerPreemptEvent event2 = new ContainerPreemptEvent(appAttemptId, container, SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE);
        rmDispatcher.getEventHandler().handle(event2);
        ContainerPreemptEvent event3 = new ContainerPreemptEvent(appAttemptId, container, SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION);
        rmDispatcher.getEventHandler().handle(event3);
        // Wait for events to be processed by scheduler dispatcher.
        Thread.sleep(1000);
        verify(sched, times(3)).handle(any(SchedulerEvent.class));
        verify(sched).killReservedContainer(container);
        verify(sched).markContainerForPreemption(appAttemptId, container);
        verify(sched).markContainerForKillable(container);
    } catch (InterruptedException e) {
        Assert.fail();
    } finally {
        schedulerDispatcher.stop();
        rmDispatcher.stop();
    }
}
Also used : EventDispatcher(org.apache.hadoop.yarn.event.EventDispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerPreemptEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) Test(org.junit.Test)

Example 18 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestContinuousScheduling method testWithNodeRemoved.

@Test
public void testWithNodeRemoved() throws Exception {
    // Disable continuous scheduling, will invoke continuous
    // scheduling once manually
    scheduler = new FairScheduler();
    conf = super.createConfiguration();
    resourceManager = new MockRM(conf);
    // TODO: This test should really be using MockRM. For now starting stuff
    // that is needed at a bare minimum.
    ((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
    resourceManager.getRMContext().getStateStore().start();
    // to initialize the master key
    resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
    scheduler.setRMContext(resourceManager.getRMContext());
    Assert.assertTrue("Continuous scheduling should be disabled.", !scheduler.isContinuousSchedulingEnabled());
    scheduler.init(conf);
    scheduler.start();
    // Add two nodes
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 2, "127.0.0.2");
    NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
    scheduler.handle(nodeEvent2);
    Assert.assertEquals("We should have two alive nodes.", 2, scheduler.getNumClusterNodes());
    // Remove one node
    NodeRemovedSchedulerEvent removeNode1 = new NodeRemovedSchedulerEvent(node1);
    scheduler.handle(removeNode1);
    Assert.assertEquals("We should only have one alive node.", 1, scheduler.getNumClusterNodes());
    // Invoke the continuous scheduling once
    try {
        scheduler.continuousSchedulingAttempt();
    } catch (Exception e) {
        fail("Exception happened when doing continuous scheduling. " + e.toString());
    }
}
Also used : RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Test(org.junit.Test)

Example 19 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestContinuousScheduling method testInterruptedException.

@Test
public void testInterruptedException() throws Exception {
    // Disable continuous scheduling, will invoke continuous
    // scheduling once manually
    scheduler = new FairScheduler();
    conf = super.createConfiguration();
    resourceManager = new MockRM(conf);
    // TODO: This test should really be using MockRM. For now starting stuff
    // that is needed at a bare minimum.
    ((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
    resourceManager.getRMContext().getStateStore().start();
    // to initialize the master key
    resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
    scheduler.setRMContext(resourceManager.getRMContext());
    scheduler.init(conf);
    scheduler.start();
    FairScheduler spyScheduler = spy(scheduler);
    Assert.assertTrue("Continuous scheduling should be disabled.", !spyScheduler.isContinuousSchedulingEnabled());
    // Add one node
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    spyScheduler.handle(nodeEvent1);
    Assert.assertEquals("We should have one alive node.", 1, spyScheduler.getNumClusterNodes());
    InterruptedException ie = new InterruptedException();
    doThrow(new YarnRuntimeException(ie)).when(spyScheduler).attemptScheduling(isA(FSSchedulerNode.class));
    // Invoke the continuous scheduling once
    try {
        spyScheduler.continuousSchedulingAttempt();
        fail("Expected InterruptedException to stop schedulingThread");
    } catch (InterruptedException e) {
        Assert.assertEquals(ie, e);
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Test(org.junit.Test)

Example 20 with AsyncDispatcher

use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.

the class TestKill method testKillTaskWait.

@Test
public void testKillTaskWait() throws Exception {
    final Dispatcher dispatcher = new AsyncDispatcher() {

        private TaskAttemptEvent cachedKillEvent;

        @Override
        protected void dispatch(Event event) {
            if (event instanceof TaskAttemptEvent) {
                TaskAttemptEvent killEvent = (TaskAttemptEvent) event;
                if (killEvent.getType() == TaskAttemptEventType.TA_KILL) {
                    TaskAttemptId taID = killEvent.getTaskAttemptID();
                    if (taID.getTaskId().getTaskType() == TaskType.REDUCE && taID.getTaskId().getId() == 0 && taID.getId() == 0) {
                        // Task is asking the reduce TA to kill itself. 'Create' a race
                        // condition. Make the task succeed and then inform the task that
                        // TA has succeeded. Once Task gets the TA succeeded event at
                        // KILL_WAIT, then relay the actual kill signal to TA
                        super.dispatch(new TaskAttemptEvent(taID, TaskAttemptEventType.TA_DONE));
                        super.dispatch(new TaskAttemptEvent(taID, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
                        super.dispatch(new TaskTAttemptEvent(taID, TaskEventType.T_ATTEMPT_SUCCEEDED));
                        this.cachedKillEvent = killEvent;
                        return;
                    }
                }
            } else if (event instanceof TaskEvent) {
                TaskEvent taskEvent = (TaskEvent) event;
                if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED && this.cachedKillEvent != null) {
                    // When the TA comes and reports that it is done, send the
                    // cachedKillEvent
                    super.dispatch(this.cachedKillEvent);
                    return;
                }
            }
            super.dispatch(event);
        }
    };
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {

        @Override
        public Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    Job job = app.submit(new Configuration());
    JobId jobId = app.getJobId();
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    // Finish map
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    // Now kill the job
    app.getContext().getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) Event(org.apache.hadoop.yarn.event.Event) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Aggregations

AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)51 Test (org.junit.Test)32 Configuration (org.apache.hadoop.conf.Configuration)28 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)20 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)13 Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)12 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)11 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)11 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)9 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)8 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)7 JobStartEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)7 Before (org.junit.Before)7 ArrayList (java.util.ArrayList)6 JobContext (org.apache.hadoop.mapreduce.JobContext)6 LocalDirsHandlerService (org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService)6