Search in sources :

Example 41 with Clock

use of org.apache.hadoop.yarn.util.Clock in project hadoop by apache.

the class TestRecovery method getMockMapTask.

private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {
    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    int partitions = 2;
    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);
    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions, eh, remoteJobConfFile, conf, taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock, appAttemptId, metrics, appContext);
    return mapTask;
}
Also used : Path(org.apache.hadoop.fs.Path) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Resource(org.apache.hadoop.yarn.api.records.Resource) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Clock(org.apache.hadoop.yarn.util.Clock) MapTaskImpl(org.apache.hadoop.mapreduce.v2.app.job.impl.MapTaskImpl) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MRAppMetrics(org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials)

Example 42 with Clock

use of org.apache.hadoop.yarn.util.Clock in project hadoop by apache.

the class TestTaskAttempt method verifyMillisCounters.

public void verifyMillisCounters(int mapMemMb, int reduceMemMb, int minContainerSize) throws Exception {
    Clock actualClock = new SystemClock();
    ControlledClock clock = new ControlledClock(actualClock);
    clock.setTime(10);
    MRApp app = new MRApp(1, 1, false, "testSlotMillisCounterUpdate", true, clock);
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemMb);
    conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, reduceMemMb);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, minContainerSize);
    app.setClusterInfo(new ClusterInfo(Resource.newInstance(10240, 1)));
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 2, tasks.size());
    Iterator<Task> taskIter = tasks.values().iterator();
    Task mTask = taskIter.next();
    app.waitForState(mTask, TaskState.RUNNING);
    Task rTask = taskIter.next();
    app.waitForState(rTask, TaskState.RUNNING);
    Map<TaskAttemptId, TaskAttempt> mAttempts = mTask.getAttempts();
    Assert.assertEquals("Num attempts is not correct", 1, mAttempts.size());
    Map<TaskAttemptId, TaskAttempt> rAttempts = rTask.getAttempts();
    Assert.assertEquals("Num attempts is not correct", 1, rAttempts.size());
    TaskAttempt mta = mAttempts.values().iterator().next();
    TaskAttempt rta = rAttempts.values().iterator().next();
    app.waitForState(mta, TaskAttemptState.RUNNING);
    app.waitForState(rta, TaskAttemptState.RUNNING);
    clock.setTime(11);
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mta.getID(), TaskAttemptEventType.TA_DONE));
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(rta.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    Assert.assertEquals(mta.getFinishTime(), 11);
    Assert.assertEquals(mta.getLaunchTime(), 10);
    Assert.assertEquals(rta.getFinishTime(), 11);
    Assert.assertEquals(rta.getLaunchTime(), 10);
    Counters counters = job.getAllCounters();
    Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize), counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue());
    Assert.assertEquals((int) Math.ceil((float) reduceMemMb / minContainerSize), counters.findCounter(JobCounter.SLOTS_MILLIS_REDUCES).getValue());
    Assert.assertEquals(1, counters.findCounter(JobCounter.MILLIS_MAPS).getValue());
    Assert.assertEquals(1, counters.findCounter(JobCounter.MILLIS_REDUCES).getValue());
    Assert.assertEquals(mapMemMb, counters.findCounter(JobCounter.MB_MILLIS_MAPS).getValue());
    Assert.assertEquals(reduceMemMb, counters.findCounter(JobCounter.MB_MILLIS_REDUCES).getValue());
    Assert.assertEquals(1, counters.findCounter(JobCounter.VCORES_MILLIS_MAPS).getValue());
    Assert.assertEquals(1, counters.findCounter(JobCounter.VCORES_MILLIS_REDUCES).getValue());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Clock(org.apache.hadoop.yarn.util.Clock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) Counters(org.apache.hadoop.mapreduce.Counters) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp)

Aggregations

Clock (org.apache.hadoop.yarn.util.Clock)42 Test (org.junit.Test)30 SystemClock (org.apache.hadoop.yarn.util.SystemClock)18 UTCClock (org.apache.hadoop.yarn.util.UTCClock)17 ReservationSubmissionRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest)16 IOException (java.io.IOException)14 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)14 ReservationId (org.apache.hadoop.yarn.api.records.ReservationId)12 ApplicationNotFoundException (org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException)12 ReservationListRequest (org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest)10 ReservationListResponse (org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse)10 Configuration (org.apache.hadoop.conf.Configuration)9 BrokenBarrierException (java.util.concurrent.BrokenBarrierException)8 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)8 MiniYARNCluster (org.apache.hadoop.yarn.server.MiniYARNCluster)8 AccessControlException (java.security.AccessControlException)7 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)6 Resource (org.apache.hadoop.yarn.api.records.Resource)6 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)5 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)5