use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestHedgingRequestRMFailoverProxyProvider method testHedgingRequestProxyProvider.
@Test
public void testHedgingRequestProxyProvider() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3,rm4,rm5");
conf.set(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER, RequestHedgingRMFailoverProxyProvider.class.getName());
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 2000);
try (MiniYARNCluster cluster = new MiniYARNCluster("testHedgingRequestProxyProvider", 5, 0, 1, 1)) {
HATestUtil.setRpcAddressForRM("rm1", 10000, conf);
HATestUtil.setRpcAddressForRM("rm2", 20000, conf);
HATestUtil.setRpcAddressForRM("rm3", 30000, conf);
HATestUtil.setRpcAddressForRM("rm4", 40000, conf);
HATestUtil.setRpcAddressForRM("rm5", 50000, conf);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
cluster.init(conf);
cluster.start();
final YarnClient client = YarnClient.createYarnClient();
client.init(conf);
client.start();
// Transition rm5 to active;
long start = System.currentTimeMillis();
makeRMActive(cluster, 4);
validateActiveRM(client);
long end = System.currentTimeMillis();
System.out.println("Client call succeeded at " + end);
// should return the response fast
Assert.assertTrue(end - start <= 10000);
// transition rm5 to standby
cluster.getResourceManager(4).getRMContext().getRMAdminService().transitionToStandby(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER));
makeRMActive(cluster, 2);
validateActiveRM(client);
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestTimelineClientV2Impl method setup.
@Before
public void setup() {
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
conf.setInt(YarnConfiguration.NUMBER_OF_ASYNC_ENTITIES_TO_MERGE, 3);
if (!currTestName.getMethodName().contains("testRetryOnConnectionFailure")) {
client = createTimelineClient(conf);
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestAsyncDispatcher method testDrainDispatcherDrainEventsOnStop.
// Test if drain dispatcher drains events on stop.
@SuppressWarnings({ "rawtypes" })
@Test(timeout = 10000)
public void testDrainDispatcherDrainEventsOnStop() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 2000);
BlockingQueue<Event> queue = new LinkedBlockingQueue<Event>();
DrainDispatcher disp = new DrainDispatcher(queue);
disp.init(conf);
disp.register(DummyType.class, new DummyHandler());
disp.setDrainEventsOnStop();
disp.start();
disp.waitForEventThreadToWait();
dispatchDummyEvents(disp, 2);
disp.close();
assertEquals(0, queue.size());
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestTimelineClientForATS1_5 method createTimelineClient.
private TimelineClientImpl createTimelineClient(YarnConfiguration conf) {
TimelineClientImpl client = new TimelineClientImpl() {
@Override
protected TimelineWriter createTimelineWriter(Configuration conf, UserGroupInformation authUgi, Client client, URI resURI) throws IOException {
TimelineWriter timelineWriter = new FileSystemTimelineWriter(conf, authUgi, client, resURI) {
public ClientResponse doPostingObject(Object object, String path) {
ClientResponse response = mock(ClientResponse.class);
when(response.getStatusInfo()).thenReturn(ClientResponse.Status.OK);
return response;
}
};
spyTimelineWriter = spy(timelineWriter);
return spyTimelineWriter;
}
};
client.init(conf);
client.start();
return client;
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestCommonNodeLabelsManager method testNodeLabelsDisabled.
@Test(timeout = 5000)
public void testNodeLabelsDisabled() throws IOException {
DummyCommonNodeLabelsManager mgr = new DummyCommonNodeLabelsManager();
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false);
mgr.init(conf);
mgr.start();
boolean caught = false;
// add labels
try {
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x"));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// remove labels
try {
mgr.removeFromClusterNodeLabels(ImmutableSet.of("x"));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// add labels to node
try {
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// remove labels from node
try {
mgr.removeLabelsFromNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
// replace labels on node
try {
mgr.replaceLabelsOnNode(ImmutableMap.of(NodeId.newInstance("host", 0), CommonNodeLabelsManager.EMPTY_STRING_SET));
} catch (IOException e) {
assertNodeLabelsDisabledErrorMessage(e);
caught = true;
}
// check exception caught
Assert.assertTrue(caught);
caught = false;
mgr.close();
}
Aggregations