use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestSystemMetricsPublisherForV2 method setup.
@BeforeClass
public static void setup() throws Exception {
if (testRootDir.exists()) {
//cleanup before hand
FileContext.getLocalFSFileContext().delete(new Path(testRootDir.getAbsolutePath()), true);
}
RMContext rmContext = mock(RMContext.class);
rmAppsMapInContext = new ConcurrentHashMap<ApplicationId, RMApp>();
when(rmContext.getRMApps()).thenReturn(rmAppsMapInContext);
rmTimelineCollectorManager = new RMTimelineCollectorManager(rmContext);
when(rmContext.getRMTimelineCollectorManager()).thenReturn(rmTimelineCollectorManager);
Configuration conf = getTimelineV2Conf();
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
rmTimelineCollectorManager.init(conf);
rmTimelineCollectorManager.start();
dispatcher.init(conf);
dispatcher.start();
metricsPublisher = new TimelineServiceV2Publisher(rmContext) {
@Override
protected Dispatcher getDispatcher() {
return dispatcher;
}
};
metricsPublisher.init(conf);
metricsPublisher.start();
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestReservations method testFindNodeToUnreserve.
@Test
public void testFindNodeToUnreserve() throws Exception {
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
setup(csConf);
final String user_0 = "user_0";
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
String host_1 = "host_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
// Setup resource-requests
Priority p = TestUtils.createMockPriority(5);
SchedulerRequestKey priorityMap = toSchedulerKey(p);
Resource capability = Resources.createResource(2 * GB, 0);
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
RMContext rmContext = mock(RMContext.class);
ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
DrainDispatcher drainDispatcher = new DrainDispatcher();
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
Container container = TestUtils.getMockContainer(containerId, node_1.getNodeID(), Resources.createResource(2 * GB), priorityMap.getPriority());
RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_1.getNodeID(), "user", rmContext);
// nothing reserved
RMContainer toUnreserveContainer = app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1, priorityMap, capability);
assertTrue(toUnreserveContainer == null);
// reserved but scheduler doesn't know about that node.
app_0.reserve(node_1, priorityMap, rmContainer, container);
node_1.reserveResource(app_0, priorityMap, rmContainer);
toUnreserveContainer = app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1, priorityMap, capability);
assertTrue(toUnreserveContainer == null);
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestCapacityScheduler method testResourceUpdateDecommissioningNode.
@Test
public void testResourceUpdateDecommissioningNode() throws Exception {
// Mock the RMNodeResourceUpdate event handler to update SchedulerNode
// to have 0 available resource
RMContext spyContext = Mockito.spy(resourceManager.getRMContext());
Dispatcher mockDispatcher = mock(AsyncDispatcher.class);
when(mockDispatcher.getEventHandler()).thenReturn(new EventHandler<Event>() {
@Override
public void handle(Event event) {
if (event instanceof RMNodeResourceUpdateEvent) {
RMNodeResourceUpdateEvent resourceEvent = (RMNodeResourceUpdateEvent) event;
resourceManager.getResourceScheduler().getSchedulerNode(resourceEvent.getNodeId()).updateTotalResource(resourceEvent.getResourceOption().getResource());
}
}
});
Mockito.doReturn(mockDispatcher).when(spyContext).getDispatcher();
((CapacityScheduler) resourceManager.getResourceScheduler()).setRMContext(spyContext);
((AsyncDispatcher) mockDispatcher).start();
// Register node
String host_0 = "host_0";
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4));
// ResourceRequest priorities
Priority priority_0 = Priority.newInstance(0);
// Submit an application
Application application_0 = new Application("user_0", "a1", resourceManager);
application_0.submit();
application_0.addNodeManager(host_0, 1234, nm_0);
Resource capability_0_0 = Resources.createResource(1 * GB, 1);
application_0.addResourceRequestSpec(priority_0, capability_0_0);
Task task_0_0 = new Task(application_0, priority_0, new String[] { host_0 });
application_0.addTask(task_0_0);
// Send resource requests to the scheduler
application_0.schedule();
nodeUpdate(nm_0);
// Kick off another heartbeat with the node state mocked to decommissioning
// This should update the schedulernodes to have 0 available resource
RMNode spyNode = Mockito.spy(resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId()));
when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING);
resourceManager.getResourceScheduler().handle(new NodeUpdateSchedulerEvent(spyNode));
// Get allocations from the scheduler
application_0.schedule();
// Check the used resource is 1 GB 1 core
Assert.assertEquals(1 * GB, nm_0.getUsed().getMemorySize());
Resource usedResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
Assert.assertEquals(usedResource.getMemorySize(), 1 * GB);
Assert.assertEquals(usedResource.getVirtualCores(), 1);
// Check total resource of scheduler node is also changed to 1 GB 1 core
Resource totalResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getTotalResource();
Assert.assertEquals(totalResource.getMemorySize(), 1 * GB);
Assert.assertEquals(totalResource.getVirtualCores(), 1);
// Check the available resource is 0/0
Resource availableResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
Assert.assertEquals(availableResource.getMemorySize(), 0);
Assert.assertEquals(availableResource.getVirtualCores(), 0);
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestFSAppAttempt method testHeadroom.
@Test
public void testHeadroom() {
final FairScheduler mockScheduler = Mockito.mock(FairScheduler.class);
Mockito.when(mockScheduler.getClock()).thenReturn(scheduler.getClock());
final FSLeafQueue mockQueue = Mockito.mock(FSLeafQueue.class);
final Resource queueMaxResources = Resource.newInstance(5 * 1024, 3);
final Resource queueFairShare = Resources.createResource(4096, 2);
final Resource queueUsage = Resource.newInstance(2048, 2);
final Resource queueStarvation = Resources.subtract(queueFairShare, queueUsage);
final Resource queueMaxResourcesAvailable = Resources.subtract(queueMaxResources, queueUsage);
final Resource clusterResource = Resources.createResource(8192, 8);
final Resource clusterUsage = Resources.createResource(2048, 2);
final Resource clusterAvailable = Resources.subtract(clusterResource, clusterUsage);
final QueueMetrics fakeRootQueueMetrics = Mockito.mock(QueueMetrics.class);
Mockito.when(mockQueue.getMaxShare()).thenReturn(queueMaxResources);
Mockito.when(mockQueue.getFairShare()).thenReturn(queueFairShare);
Mockito.when(mockQueue.getResourceUsage()).thenReturn(queueUsage);
Mockito.when(mockScheduler.getClusterResource()).thenReturn(clusterResource);
Mockito.when(fakeRootQueueMetrics.getAllocatedResources()).thenReturn(clusterUsage);
Mockito.when(mockScheduler.getRootQueueMetrics()).thenReturn(fakeRootQueueMetrics);
ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
RMContext rmContext = resourceManager.getRMContext();
FSAppAttempt schedulerApp = new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue, null, rmContext);
// Min of Memory and CPU across cluster and queue is used in
// DominantResourceFairnessPolicy
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(DominantResourceFairnessPolicy.class));
verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), min(queueStarvation.getVirtualCores(), clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
// Fair and Fifo ignore CPU of queue, so use cluster available CPU
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(FairSharePolicy.class));
verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), Math.min(clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(FifoPolicy.class));
verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), Math.min(clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestFSLeafQueue method testConcurrentAccess.
@Test
public void testConcurrentAccess() {
conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
resourceManager = new MockRM(conf);
resourceManager.start();
scheduler = (FairScheduler) resourceManager.getResourceScheduler();
String queueName = "root.queue1";
final FSLeafQueue schedulable = scheduler.getQueueManager().getLeafQueue(queueName, true);
ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
RMContext rmContext = resourceManager.getRMContext();
final FSAppAttempt app = new FSAppAttempt(scheduler, applicationAttemptId, "user1", schedulable, null, rmContext);
// this needs to be in sync with the number of runnables declared below
int testThreads = 2;
List<Runnable> runnables = new ArrayList<Runnable>();
// add applications to modify the list
runnables.add(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 500; i++) {
schedulable.addAppSchedulable(app);
}
}
});
// iterate over the list a couple of times in a different thread
runnables.add(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 500; i++) {
schedulable.getResourceUsage();
}
}
});
final List<Throwable> exceptions = Collections.synchronizedList(new ArrayList<Throwable>());
final ExecutorService threadPool = HadoopExecutors.newFixedThreadPool(testThreads);
try {
final CountDownLatch allExecutorThreadsReady = new CountDownLatch(testThreads);
final CountDownLatch startBlocker = new CountDownLatch(1);
final CountDownLatch allDone = new CountDownLatch(testThreads);
for (final Runnable submittedTestRunnable : runnables) {
threadPool.submit(new Runnable() {
public void run() {
allExecutorThreadsReady.countDown();
try {
startBlocker.await();
submittedTestRunnable.run();
} catch (final Throwable e) {
exceptions.add(e);
} finally {
allDone.countDown();
}
}
});
}
// wait until all threads are ready
allExecutorThreadsReady.await();
// start all test runners
startBlocker.countDown();
int testTimeout = 2;
assertTrue("Timeout waiting for more than " + testTimeout + " seconds", allDone.await(testTimeout, TimeUnit.SECONDS));
} catch (InterruptedException ie) {
exceptions.add(ie);
} finally {
threadPool.shutdownNow();
}
assertTrue("Test failed with exception(s)" + exceptions, exceptions.isEmpty());
}
Aggregations