use of org.apache.hadoop.yarn.client.api.YarnClient in project hadoop by apache.
the class TestYarnClient method testSignalContainer.
@Test
public void testSignalContainer() throws Exception {
Configuration conf = new Configuration();
@SuppressWarnings("resource") final YarnClient client = new MockYarnClient();
client.init(conf);
client.start();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
SignalContainerCommand command = SignalContainerCommand.OUTPUT_THREAD_DUMP;
client.signalToContainer(containerId, command);
final ArgumentCaptor<SignalContainerRequest> signalReqCaptor = ArgumentCaptor.forClass(SignalContainerRequest.class);
verify(((MockYarnClient) client).getRMClient()).signalToContainer(signalReqCaptor.capture());
SignalContainerRequest request = signalReqCaptor.getValue();
Assert.assertEquals(containerId, request.getContainerId());
Assert.assertEquals(command, request.getCommand());
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project hadoop by apache.
the class TestYarnClient method testClientStop.
@Test
public void testClientStop() {
Configuration conf = new Configuration();
ResourceManager rm = new ResourceManager();
rm.init(conf);
rm.start();
YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
client.stop();
rm.stop();
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project hadoop by apache.
the class TestYarnClient method testGetApplications.
@Test(timeout = 10000)
public void testGetApplications() throws YarnException, IOException {
Configuration conf = new Configuration();
final YarnClient client = new MockYarnClient();
client.init(conf);
client.start();
List<ApplicationReport> expectedReports = ((MockYarnClient) client).getReports();
List<ApplicationReport> reports = client.getApplications();
Assert.assertEquals(reports, expectedReports);
Set<String> appTypes = new HashSet<String>();
appTypes.add("YARN");
appTypes.add("NON-YARN");
reports = client.getApplications(appTypes, null);
Assert.assertEquals(reports.size(), 2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports.get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports.get(0).getApplicationType().equals("NON-YARN")));
for (ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
EnumSet<YarnApplicationState> appStates = EnumSet.noneOf(YarnApplicationState.class);
appStates.add(YarnApplicationState.FINISHED);
appStates.add(YarnApplicationState.FAILED);
reports = client.getApplications(null, appStates);
Assert.assertEquals(reports.size(), 2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports.get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports.get(0).getApplicationType().equals("NON-MAPREDUCE")));
for (ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
reports = client.getApplications(appTypes, appStates);
Assert.assertEquals(reports.size(), 1);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN")));
for (ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
client.stop();
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project hadoop by apache.
the class TestYarnCLI method testGetQueueInfoPreemptionEnabled.
@Test
public void testGetQueueInfoPreemptionEnabled() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity." + "ProportionalCapacityPreemptionPolicy");
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
MiniYARNCluster cluster = new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
YarnClient yarnClient = null;
try {
cluster.init(conf);
cluster.start();
final Configuration yarnConf = cluster.getConfig();
yarnClient = YarnClient.createYarnClient();
yarnClient.init(yarnConf);
yarnClient.start();
QueueCLI cli = new QueueCLI();
cli.setClient(yarnClient);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
sysOutStream.reset();
int result = cli.run(new String[] { "-status", "a1" });
assertEquals(0, result);
Assert.assertTrue(sysOutStream.toString().contains("Preemption : enabled"));
} finally {
// clean-up
if (yarnClient != null) {
yarnClient.stop();
}
cluster.stop();
cluster.close();
}
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project hadoop by apache.
the class TestDistributedShell method testDSShell.
public void testDSShell(boolean haveDomain, boolean defaultFlow) throws Exception {
String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" };
if (haveDomain) {
String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" };
args = mergeArgs(args, domainArgs);
}
boolean isTestingTimelineV2 = false;
if (timelineVersionWatcher.getTimelineVersion() == 2.0f) {
isTestingTimelineV2 = true;
if (!defaultFlow) {
String[] flowArgs = { "--flow_name", "test_flow_name", "--flow_version", "test_flow_version", "--flow_run_id", "12345678" };
args = mergeArgs(args, flowArgs);
}
LOG.info("Setup: Using timeline v2!");
}
LOG.info("Initializing DS Client");
final Client client = new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result = new AtomicBoolean(false);
Thread t = new Thread() {
public void run() {
try {
result.set(client.run());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
t.start();
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(new Configuration(yarnCluster.getConfig()));
yarnClient.start();
String hostName = NetUtils.getHostname();
boolean verified = false;
String errorMessage = "";
ApplicationId appId = null;
ApplicationReport appReport = null;
while (!verified) {
List<ApplicationReport> apps = yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
appReport = apps.get(0);
appId = appReport.getApplicationId();
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified = true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED && appReport.getFinalApplicationStatus() != FinalApplicationStatus.UNDEFINED) {
break;
}
}
Assert.assertTrue(errorMessage, verified);
t.join();
LOG.info("Client run completed for testDSShell. Result=" + result);
Assert.assertTrue(result.get());
if (timelineVersionWatcher.getTimelineVersion() == 1.5f) {
long scanInterval = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT);
Path doneDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT);
// Wait till the data is moved to done dir, or timeout and fail
while (true) {
RemoteIterator<FileStatus> iterApps = fs.listStatusIterator(doneDir);
if (iterApps.hasNext()) {
break;
}
Thread.sleep(scanInterval * 2);
}
}
TimelineDomain domain = null;
if (!isTestingTimelineV2) {
checkTimelineV1(haveDomain);
} else {
checkTimelineV2(haveDomain, appId, defaultFlow, appReport);
}
}
Aggregations