use of org.apache.hadoop.yarn.event.EventHandler in project hadoop by apache.
the class TestContainerLaunch method testCallFailureWithNullLocalizedResources.
@SuppressWarnings("rawtypes")
@Test(timeout = 10000)
public void testCallFailureWithNullLocalizedResources() {
Container container = mock(Container.class);
when(container.getContainerId()).thenReturn(ContainerId.newContainerId(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1));
ContainerLaunchContext clc = mock(ContainerLaunchContext.class);
when(clc.getCommands()).thenReturn(Collections.<String>emptyList());
when(container.getLaunchContext()).thenReturn(clc);
when(container.getLocalizedResources()).thenReturn(null);
Dispatcher dispatcher = mock(Dispatcher.class);
EventHandler<Event> eventHandler = new EventHandler<Event>() {
@Override
public void handle(Event event) {
Assert.assertTrue(event instanceof ContainerExitEvent);
ContainerExitEvent exitEvent = (ContainerExitEvent) event;
Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitEvent.getType());
}
};
when(dispatcher.getEventHandler()).thenReturn(eventHandler);
ContainerLaunch launch = new ContainerLaunch(context, new Configuration(), dispatcher, exec, null, container, dirsHandler, containerManager);
launch.call();
}
use of org.apache.hadoop.yarn.event.EventHandler in project hadoop by apache.
the class TestContainerLaunch method testPrependDistcache.
@Test
public void testPrependDistcache() throws Exception {
// Test is only relevant on Windows
assumeWindows();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
Map<String, String> userSetEnv = new HashMap<String, String>();
userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
userSetEnv.put(Environment.USER.key(), "user_set_" + Environment.USER.key());
userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME");
userSetEnv.put(Environment.PWD.name(), "user_set_PWD");
userSetEnv.put(Environment.HOME.name(), "user_set_HOME");
userSetEnv.put(Environment.CLASSPATH.name(), "APATH");
containerLaunchContext.setEnvironment(userSetEnv);
Container container = mock(Container.class);
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(containerLaunchContext);
when(container.getLocalizedResources()).thenReturn(null);
Dispatcher dispatcher = mock(Dispatcher.class);
EventHandler<Event> eventHandler = new EventHandler<Event>() {
public void handle(Event event) {
Assert.assertTrue(event instanceof ContainerExitEvent);
ContainerExitEvent exitEvent = (ContainerExitEvent) event;
Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitEvent.getType());
}
};
when(dispatcher.getEventHandler()).thenReturn(eventHandler);
Configuration conf = new Configuration();
ContainerLaunch launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager);
String testDir = System.getProperty("test.build.data", "target/test-dir");
Path pwd = new Path(testDir);
List<Path> appDirs = new ArrayList<Path>();
List<String> userLocalDirs = new ArrayList<>();
List<String> containerLogs = new ArrayList<String>();
Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
Path userjar = new Path("user.jar");
List<String> lpaths = new ArrayList<String>();
lpaths.add("userjarlink.jar");
resources.put(userjar, lpaths);
Path nmp = new Path(testDir);
launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, resources, nmp);
List<String> result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
Assert.assertTrue(result.size() > 1);
Assert.assertTrue(result.get(result.size() - 1).endsWith("userjarlink.jar"));
//Then, with user classpath first
userSetEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
cId = ContainerId.newContainerId(appAttemptId, 1);
when(container.getContainerId()).thenReturn(cId);
launch = new ContainerLaunch(distContext, conf, dispatcher, exec, null, container, dirsHandler, containerManager);
launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, resources, nmp);
result = getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
Assert.assertTrue(result.size() > 1);
Assert.assertTrue(result.get(0).endsWith("userjarlink.jar"));
}
use of org.apache.hadoop.yarn.event.EventHandler in project hadoop by apache.
the class TestLocalContainerLauncher method testKillJob.
@SuppressWarnings("rawtypes")
@Test(timeout = 10000)
public void testKillJob() throws Exception {
JobConf conf = new JobConf();
AppContext context = mock(AppContext.class);
// a simple event handler solely to detect the container cleaned event
final CountDownLatch isDone = new CountDownLatch(1);
EventHandler<Event> handler = new EventHandler<Event>() {
@Override
public void handle(Event event) {
LOG.info("handling event " + event.getClass() + " with type " + event.getType());
if (event instanceof TaskAttemptEvent) {
if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
isDone.countDown();
}
}
}
};
when(context.getEventHandler()).thenReturn(handler);
// create and start the launcher
LocalContainerLauncher launcher = new LocalContainerLauncher(context, mock(TaskUmbilicalProtocol.class));
launcher.init(conf);
launcher.start();
// create mocked job, task, and task attempt
// a single-mapper job
JobId jobId = MRBuilderUtils.newJobId(System.currentTimeMillis(), 1, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId taId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Job job = mock(Job.class);
when(job.getTotalMaps()).thenReturn(1);
when(job.getTotalReduces()).thenReturn(0);
Map<JobId, Job> jobs = new HashMap<JobId, Job>();
jobs.put(jobId, job);
// app context returns the one and only job
when(context.getAllJobs()).thenReturn(jobs);
org.apache.hadoop.mapreduce.v2.app.job.Task ytask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(ytask.getType()).thenReturn(TaskType.MAP);
when(job.getTask(taskId)).thenReturn(ytask);
// create a sleeping mapper that runs beyond the test timeout
MapTask mapTask = mock(MapTask.class);
when(mapTask.isMapOrReduce()).thenReturn(true);
when(mapTask.isMapTask()).thenReturn(true);
TaskAttemptID taskID = TypeConverter.fromYarn(taId);
when(mapTask.getTaskID()).thenReturn(taskID);
when(mapTask.getJobID()).thenReturn(taskID.getJobID());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
// sleep for a long time
LOG.info("sleeping for 5 minutes...");
Thread.sleep(5 * 60 * 1000);
return null;
}
}).when(mapTask).run(isA(JobConf.class), isA(TaskUmbilicalProtocol.class));
// pump in a task attempt launch event
ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, createMockContainer(), mapTask);
launcher.handle(launchEvent);
Thread.sleep(200);
// now pump in a container clean-up event
ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null, null, ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP);
launcher.handle(cleanupEvent);
// wait for the event to fire: this should be received promptly
isDone.await();
launcher.close();
}
use of org.apache.hadoop.yarn.event.EventHandler in project hadoop by apache.
the class TestShuffleProvider method testShuffleProviders.
@Test
public void testShuffleProviders() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
Path jobFile = mock(Path.class);
EventHandler eventHandler = mock(EventHandler.class);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(YarnConfiguration.NM_AUX_SERVICES, TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
String serviceName = TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
String serviceStr = String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, serviceName);
jobConf.set(serviceStr, TestShuffleHandler1.class.getName());
serviceName = TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
serviceStr = String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, serviceName);
jobConf.set(serviceStr, TestShuffleHandler2.class.getName());
jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES, TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
Credentials credentials = new Credentials();
Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice"));
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, jobToken, credentials, SystemClock.getInstance(), null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(null, jobConf, jobToken, taImpl.createRemoteTask(), TypeConverter.fromYarn(jobId), mock(WrappedJvmID.class), taListener, credentials);
Map<String, ByteBuffer> serviceDataMap = launchCtx.getServiceData();
Assert.assertNotNull("TestShuffleHandler1 is missing", serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertNotNull("TestShuffleHandler2 is missing", serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
// 2 that we entered + 1 for the built-in shuffle-provider
Assert.assertTrue("mismatch number of services in map", serviceDataMap.size() == 3);
}
use of org.apache.hadoop.yarn.event.EventHandler in project hadoop by apache.
the class TestTaskAttempt method testSingleRackRequest.
@Test
public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct = new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "host1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo = new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt = createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt, mockTAEvent);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre = (ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedRacks = cre.getRacks();
//Only a single occurrence of /DefaultRack
assertEquals(1, requestedRacks.length);
}
Aggregations