use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestYarnClient method testCreateReservation.
@Test
public void testCreateReservation() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
// Submit the reservation again with the same request and make sure it
// passes.
client.submitReservation(sRequest);
// Submit the reservation with the same reservation id but different
// reservation definition, and ensure YarnException is thrown.
arrival = clock.getTime();
ReservationDefinition rDef = sRequest.getReservationDefinition();
rDef.setArrival(arrival + duration);
sRequest.setReservationDefinition(rDef);
try {
client.submitReservation(sRequest);
Assert.fail("Reservation submission should fail if a duplicate " + "reservation id is used, but the reservation definition has been " + "updated.");
} catch (Exception e) {
Assert.assertTrue(e instanceof YarnException);
}
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestYarnClient method testListReservationsByTimeInterval.
@Test
public void testListReservationsByTimeInterval() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
// List reservations, search by a point in time within the reservation
// range.
arrival = clock.getTime();
ReservationId reservationID = sRequest.getReservationId();
ReservationListRequest request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", arrival + duration / 2, arrival + duration / 2, true);
ReservationListResponse response = client.listReservations(request);
Assert.assertNotNull(response);
Assert.assertEquals(1, response.getReservationAllocationState().size());
Assert.assertEquals(response.getReservationAllocationState().get(0).getReservationId().getId(), reservationID.getId());
// List reservations, search by time within reservation interval.
request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", 1, Long.MAX_VALUE, true);
response = client.listReservations(request);
Assert.assertNotNull(response);
Assert.assertEquals(1, response.getReservationAllocationState().size());
Assert.assertEquals(response.getReservationAllocationState().get(0).getReservationId().getId(), reservationID.getId());
// Verify that the full resource allocations exist.
Assert.assertTrue(response.getReservationAllocationState().get(0).getResourceAllocationRequests().size() > 0);
// Verify that the full RDL is returned.
ReservationRequests reservationRequests = response.getReservationAllocationState().get(0).getReservationDefinition().getReservationRequests();
Assert.assertTrue(reservationRequests.getInterpreter().toString().equals("R_ALL"));
Assert.assertTrue(reservationRequests.getReservationResources().get(0).getDuration() == duration);
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestDistributedScheduling method doBefore.
@Before
public void doBefore() throws Exception {
cluster = new MiniYARNCluster("testDistributedSchedulingE2E", 1, 1, 1);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true);
conf.setInt(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
cluster.init(conf);
cluster.start();
yarnConf = cluster.getConfig();
// the client has to connect to AMRMProxy
yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
rmClient = YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
// Submit application
attemptId = createApp(rmClient, cluster, conf);
appId = attemptId.getApplicationId();
client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestRMFailover method testRMWebAppRedirect.
@Test
public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException {
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
String rm1Url = "http://0.0.0.0:18088";
String rm2Url = "http://0.0.0.0:28088";
String redirectURL = getRedirectURL(rm2Url);
// if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url
assertEquals(redirectURL, rm1Url + "/");
redirectURL = getRedirectURL(rm2Url + "/metrics");
assertEquals(redirectURL, rm1Url + "/metrics");
redirectURL = getRedirectURL(rm2Url + "/jmx?param1=value1+x¶m2=y");
assertEquals(rm1Url + "/jmx?param1=value1+x¶m2=y", redirectURL);
// standby RM links /conf, /stacks, /logLevel, /static, /logs,
// /cluster/cluster as well as webService
// /ws/v1/cluster/info should not be redirected to active RM
redirectURL = getRedirectURL(rm2Url + "/cluster/cluster");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/conf");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/stacks");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/logLevel");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/static");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/logs");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/info");
assertNull(redirectURL);
redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/apps");
assertEquals(redirectURL, rm1Url + "/ws/v1/cluster/apps");
redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
assertNull(redirectURL);
// transit the active RM to standby
// Both of RMs are in standby mode
getAdminService(0).transitionToStandby(req);
// RM2 is expected to send the httpRequest to itself.
// The Header Field: Refresh is expected to be set.
redirectURL = getRefreshURL(rm2Url);
assertTrue(redirectURL != null && redirectURL.contains(YarnWebParams.NEXT_REFRESH_INTERVAL) && redirectURL.contains(rm2Url));
}
use of org.apache.hadoop.yarn.server.MiniYARNCluster in project hadoop by apache.
the class TestRMFailover method setup.
@Before
public void setup() throws IOException {
fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1);
}
Aggregations