use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestResourceMgrDelegate method testGetRootQueues.
/**
* Tests that getRootQueues makes a request for the (recursive) child queues
* @throws IOException
*/
@Test
public void testGetRootQueues() throws IOException, InterruptedException {
final ApplicationClientProtocol applicationsManager = Mockito.mock(ApplicationClientProtocol.class);
GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(response.getQueueInfo()).thenReturn(queueInfo);
try {
Mockito.when(applicationsManager.getQueueInfo(Mockito.any(GetQueueInfoRequest.class))).thenReturn(response);
} catch (YarnException e) {
throw new IOException(e);
}
ResourceMgrDelegate delegate = new ResourceMgrDelegate(new YarnConfiguration()) {
@Override
protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(applicationsManager);
}
};
delegate.getRootQueues();
ArgumentCaptor<GetQueueInfoRequest> argument = ArgumentCaptor.forClass(GetQueueInfoRequest.class);
try {
Mockito.verify(applicationsManager).getQueueInfo(argument.capture());
} catch (YarnException e) {
throw new IOException(e);
}
Assert.assertTrue("Children of root queue not requested", argument.getValue().getIncludeChildQueues());
Assert.assertTrue("Request wasn't to recurse through children", argument.getValue().getRecursive());
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestYARNRunner method setUp.
@Before
public void setUp() throws Exception {
resourceMgrDelegate = mock(ResourceMgrDelegate.class);
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
clientCache = new ClientCache(conf, resourceMgrDelegate);
clientCache = spy(clientCache);
yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
yarnRunner = spy(yarnRunner);
submissionContext = mock(ApplicationSubmissionContext.class);
doAnswer(new Answer<ApplicationSubmissionContext>() {
@Override
public ApplicationSubmissionContext answer(InvocationOnMock invocation) throws Throwable {
return submissionContext;
}
}).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class), any(String.class), any(Credentials.class));
appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
jobId = TypeConverter.fromYarn(appId);
if (testWorkDir.exists()) {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
}
testWorkDir.mkdirs();
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestApplicationHistoryClientService method testApplications.
@Test
public void testApplications() throws IOException, YarnException {
ApplicationId appId = null;
appId = ApplicationId.newInstance(0, 1);
ApplicationId appId1 = ApplicationId.newInstance(0, 2);
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
GetApplicationsResponse response = clientService.getApplications(request);
List<ApplicationReport> appReport = response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertEquals(appId, appReport.get(1).getApplicationId());
Assert.assertEquals(appId1, appReport.get(0).getApplicationId());
// Create a historyManager, and set the max_apps can be loaded
// as 1.
Configuration conf = new YarnConfiguration();
conf.setLong(YarnConfiguration.APPLICATION_HISTORY_MAX_APPS, 1);
ApplicationHistoryManagerOnTimelineStore historyManager2 = new ApplicationHistoryManagerOnTimelineStore(dataManager, new ApplicationACLsManager(conf));
historyManager2.init(conf);
historyManager2.start();
@SuppressWarnings("resource") ApplicationHistoryClientService clientService2 = new ApplicationHistoryClientService(historyManager2);
response = clientService2.getApplications(request);
appReport = response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertTrue(appReport.size() == 1);
// Expected to get the appReport for application with appId1
Assert.assertEquals(appId1, appReport.get(0).getApplicationId());
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestApplicationHistoryManagerOnTimelineStore method setup.
@Before
public void setup() throws Exception {
// Only test the ACLs of the generic history
TimelineACLsManager aclsManager = new TimelineACLsManager(new YarnConfiguration());
aclsManager.setTimelineStore(store);
TimelineDataManager dataManager = new TimelineDataManager(store, aclsManager);
dataManager.init(conf);
ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
historyManager = new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
historyManager.init(conf);
historyManager.start();
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestApplicationHistoryServer method testHostedUIs.
@Test(timeout = 240000)
public void testHostedUIs() throws Exception {
ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE, MemoryTimelineStore.class, TimelineStore.class);
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS, MemoryTimelineStateStore.class, TimelineStateStore.class);
config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:0");
final String UI1 = "UI1";
String connFileStr = "";
File diskFile = new File("./pom.xml");
String diskFileStr = readInputStream(new FileInputStream(diskFile));
try {
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_NAMES, UI1);
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_WEB_PATH_PREFIX + UI1, "/" + UI1);
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_ON_DISK_PATH_PREFIX + UI1, "./");
historyServer.init(config);
historyServer.start();
URL url = new URL("http://localhost:" + historyServer.getPort() + "/" + UI1 + "/pom.xml");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
connFileStr = readInputStream(conn.getInputStream());
} finally {
historyServer.stop();
}
assertEquals("Web file contents should be the same as on disk contents", diskFileStr, connFileStr);
}
Aggregations