Search in sources :

Example 21 with ControlledClock

use of org.apache.hadoop.yarn.util.ControlledClock in project hadoop by apache.

the class TestRMContainerAllocator method testHeartbeatHandler.

@Test
public void testHeartbeatHandler() throws Exception {
    LOG.info("Running testHeartbeatHandler");
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 1);
    ControlledClock clock = new ControlledClock();
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClock()).thenReturn(clock);
    when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1, 1));
    RMContainerAllocator allocator = new RMContainerAllocator(mock(ClientService.class), appContext, new NoopAMPreemptionPolicy()) {

        @Override
        protected void register() {
        }

        @Override
        protected ApplicationMasterProtocol createSchedulerProxy() {
            return mock(ApplicationMasterProtocol.class);
        }

        @Override
        protected synchronized void heartbeat() throws Exception {
        }
    };
    allocator.init(conf);
    allocator.start();
    clock.setTime(5);
    int timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(5, allocator.getLastHeartbeatTime());
    clock.setTime(7);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(7, allocator.getLastHeartbeatTime());
    final AtomicBoolean callbackCalled = new AtomicBoolean(false);
    allocator.runOnNextHeartbeat(new Runnable() {

        @Override
        public void run() {
            callbackCalled.set(true);
        }
    });
    clock.setTime(8);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(8, allocator.getLastHeartbeatTime());
    Assert.assertTrue(callbackCalled.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Test(org.junit.Test)

Example 22 with ControlledClock

use of org.apache.hadoop.yarn.util.ControlledClock in project hadoop by apache.

the class TestSpeculativeExecutionWithMRApp method testSpeculateSuccessfulWithoutUpdateEvents.

@Test
public void testSpeculateSuccessfulWithoutUpdateEvents() throws Exception {
    Clock actualClock = SystemClock.getInstance();
    final ControlledClock clock = new ControlledClock(actualClock);
    clock.setTime(System.currentTimeMillis());
    MRApp app = new MRApp(NUM_MAPPERS, NUM_REDUCERS, false, "test", true, clock);
    Job job = app.submit(new Configuration(), true, true);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", NUM_MAPPERS + NUM_REDUCERS, tasks.size());
    Iterator<Task> taskIter = tasks.values().iterator();
    while (taskIter.hasNext()) {
        app.waitForState(taskIter.next(), TaskState.RUNNING);
    }
    // Process the update events
    clock.setTime(System.currentTimeMillis() + 2000);
    EventHandler appEventHandler = app.getContext().getEventHandler();
    for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
        for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
            TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.8, TaskAttemptState.RUNNING);
            TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
            appEventHandler.handle(event);
        }
    }
    Random generator = new Random();
    Object[] taskValues = tasks.values().toArray();
    final Task taskToBeSpeculated = (Task) taskValues[generator.nextInt(taskValues.length)];
    // Other than one random task, finish every other task.
    for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
        for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
            if (mapTask.getKey() != taskToBeSpeculated.getID()) {
                appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_DONE));
                appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
                app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
            }
        }
    }
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            if (taskToBeSpeculated.getAttempts().size() != 2) {
                clock.setTime(System.currentTimeMillis() + 1000);
                return false;
            } else {
                return true;
            }
        }
    }, 1000, 60000);
    // finish 1st TA, 2nd will be killed
    TaskAttempt[] ta = makeFirstAttemptWin(appEventHandler, taskToBeSpeculated);
    verifySpeculationMessage(app, ta);
    app.waitForState(Service.STATE.STOPPED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) EventHandler(org.apache.hadoop.yarn.event.EventHandler) Clock(org.apache.hadoop.yarn.util.Clock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) SystemClock(org.apache.hadoop.yarn.util.SystemClock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Random(java.util.Random) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttemptStatusUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent) Map(java.util.Map) Test(org.junit.Test)

Aggregations

ControlledClock (org.apache.hadoop.yarn.util.ControlledClock)22 Test (org.junit.Test)18 Configuration (org.apache.hadoop.conf.Configuration)11 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)11 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)7 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)7 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)6 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)5 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)5 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)4 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)4 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)4 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)4 DrainDispatcher (org.apache.hadoop.yarn.event.DrainDispatcher)4 FileWriter (java.io.FileWriter)3 PrintWriter (java.io.PrintWriter)3 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)3 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)3 TaskAttemptStatus (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus)3 Container (org.apache.hadoop.yarn.api.records.Container)3