use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.
the class MRAppBenchmark method benchmark1.
@Test
public void benchmark1() throws Exception {
// Adjust for benchmarking. Start with thousands.
int maps = 100;
int reduces = 0;
System.out.println("Running benchmark with maps:" + maps + " reduces:" + reduces);
run(new MRApp(maps, reduces, true, this.getClass().getName(), true) {
@Override
protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
AMPreemptionPolicy policy = new NoopAMPreemptionPolicy();
return new RMContainerAllocator(clientService, context, policy) {
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return new ApplicationMasterProtocol() {
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws IOException {
RegisterApplicationMasterResponse response = Records.newRecord(RegisterApplicationMasterResponse.class);
response.setMaximumResourceCapability(Resource.newInstance(10240, 1));
return response;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws IOException {
FinishApplicationMasterResponse response = Records.newRecord(FinishApplicationMasterResponse.class);
return response;
}
@Override
public AllocateResponse allocate(AllocateRequest request) throws IOException {
AllocateResponse response = Records.newRecord(AllocateResponse.class);
List<ResourceRequest> askList = request.getAskList();
List<Container> containers = new ArrayList<Container>();
for (ResourceRequest req : askList) {
if (!ResourceRequest.isAnyLocation(req.getResourceName())) {
continue;
}
int numContainers = req.getNumContainers();
for (int i = 0; i < numContainers; i++) {
ContainerId containerId = ContainerId.newContainerId(getContext().getApplicationAttemptId(), request.getResponseId() + i);
containers.add(Container.newInstance(containerId, NodeId.newInstance("host" + containerId.getContainerId(), 2345), "host" + containerId.getContainerId() + ":5678", req.getCapability(), req.getPriority(), null));
}
}
response.setAllocatedContainers(containers);
response.setResponseId(request.getResponseId() + 1);
response.setNumClusterNodes(350);
return response;
}
};
}
};
}
});
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.
the class TestStagingCleanup method testDeletionofStagingOnKillLastTry.
// FIXME:
// Disabled this test because currently, when shutdown hook triggered at
// lastRetry in RM view, cleanup will not do. This should be supported after
// YARN-2261 completed
// @Test (timeout = 30000)
public void testDeletionofStagingOnKillLastTry() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
//no retry
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
appMaster.init(conf);
assertTrue("appMaster.isLastAMRetry() is false", appMaster.isLastAMRetry());
//simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook = new MRAppMaster.MRAppMasterShutdownHook(appMaster);
hook.run();
assertTrue("MRAppMaster isn't stopped", appMaster.isInState(Service.STATE.STOPPED));
verify(fs).delete(stagingJobPath, true);
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.
the class TestStagingCleanup method testPreservePatternMatchedAndFailedStaging.
@Test
public void testPreservePatternMatchedAndFailedStaging() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
// When RESERVE_FILES_PATTERN and PRESERVE_FAILED_TASK_FILES are set,
// files in staging dir are always kept.
conf.set(MRJobConfig.PRESERVE_FILES_PATTERN, "JobDir");
conf.setBoolean(MRJobConfig.PRESERVE_FAILED_TASK_FILES, true);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
//test whether notifyIsLastAMRetry called
Assert.assertEquals(true, ((TestMRApp) appMaster).getTestIsLastAMRetry());
verify(fs, times(0)).delete(stagingJobPath, true);
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.
the class TestRMContainerAllocator method testReportedAppProgressWithOnlyMaps.
@Test
public void testReportedAppProgressWithOnlyMaps() throws Exception {
LOG.info("Running testReportedAppProgressWithOnlyMaps");
Configuration conf = new Configuration();
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
// Submit the application
RMApp rmApp = rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
return new MyContainerAllocator(rm, appAttemptId, context);
}
;
};
Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
mrApp.submit(conf);
Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
MyContainerAllocator allocator = (MyContainerAllocator) mrApp.getContainerAllocator();
mrApp.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
amDispatcher.await();
// Wait till all map-attempts request for containers
for (Task t : job.getTasks().values()) {
mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values().iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
// Wait for all map-tasks to be running
for (Task t : job.getTasks().values()) {
mrApp.waitForState(t, TaskState.RUNNING);
}
// Send heartbeat
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
Iterator<Task> it = job.getTasks().values().iterator();
// Finish off 1 map so that map-progress is 10%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
// Finish off 5 more map so that map-progress is 60%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
// Finish off remaining map so that map-progress is 100%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.
the class TestRMContainerAllocator method testHandlingFinishedContainers.
/**
* MAPREDUCE-6771. Test if RMContainerAllocator generates the events in the
* right order while processing finished containers.
*/
@Test
public void testHandlingFinishedContainers() {
EventHandler eventHandler = mock(EventHandler.class);
AppContext context = mock(RunningAppContext.class);
when(context.getClock()).thenReturn(new ControlledClock());
when(context.getClusterInfo()).thenReturn(new ClusterInfo(Resource.newInstance(10240, 1)));
when(context.getEventHandler()).thenReturn(eventHandler);
RMContainerAllocator containerAllocator = new RMContainerAllocatorForFinishedContainer(null, context, mock(AMPreemptionPolicy.class));
ContainerStatus finishedContainer = ContainerStatus.newInstance(mock(ContainerId.class), ContainerState.COMPLETE, "", 0);
containerAllocator.processFinishedContainer(finishedContainer);
InOrder inOrder = inOrder(eventHandler);
inOrder.verify(eventHandler).handle(isA(TaskAttemptDiagnosticsUpdateEvent.class));
inOrder.verify(eventHandler).handle(isA(TaskAttemptEvent.class));
inOrder.verifyNoMoreInteractions();
}
Aggregations