use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestContinuousScheduling method testInterruptedException.
@Test
public void testInterruptedException() throws Exception {
// Disable continuous scheduling, will invoke continuous
// scheduling once manually
scheduler = new FairScheduler();
conf = super.createConfiguration();
resourceManager = new MockRM(conf);
// TODO: This test should really be using MockRM. For now starting stuff
// that is needed at a bare minimum.
((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
resourceManager.getRMContext().getStateStore().start();
// to initialize the master key
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
scheduler.setRMContext(resourceManager.getRMContext());
scheduler.init(conf);
scheduler.start();
FairScheduler spyScheduler = spy(scheduler);
Assert.assertTrue("Continuous scheduling should be disabled.", !spyScheduler.isContinuousSchedulingEnabled());
// Add one node
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
spyScheduler.handle(nodeEvent1);
Assert.assertEquals("We should have one alive node.", 1, spyScheduler.getNumClusterNodes());
InterruptedException ie = new InterruptedException();
doThrow(new YarnRuntimeException(ie)).when(spyScheduler).attemptScheduling(isA(FSSchedulerNode.class));
// Invoke the continuous scheduling once
try {
spyScheduler.continuousSchedulingAttempt();
fail("Expected InterruptedException to stop schedulingThread");
} catch (InterruptedException e) {
Assert.assertEquals(ie, e);
}
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestFairScheduler method setUp.
@Before
public void setUp() throws IOException {
scheduler = new FairScheduler();
conf = createConfiguration();
resourceManager = new MockRM(conf);
((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
resourceManager.getRMContext().getStateStore().start();
// to initialize the master key
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
scheduler.setRMContext(resourceManager.getRMContext());
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestFifoScheduler method testNodeLocalAssignment.
@Test(timeout = 2000)
public void testNodeLocalAssignment() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
Configuration conf = new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
FifoScheduler scheduler = new FifoScheduler();
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(), rmContext);
RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(1024 * 64), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
int _appId = 1;
int _appAttemptId = 1;
ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId);
createMockRMApp(appAttemptId, rmContext);
AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
int memory = 64;
int nConts = 3;
int priority = 20;
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, nConts);
ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, nConts);
ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, nConts);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
// Before the node update event, there are 3 local requests outstanding
Assert.assertEquals(3, nodeLocal.getNumContainers());
scheduler.handle(node0Update);
// After the node update event, check that there are no more local requests
// outstanding
Assert.assertEquals(0, nodeLocal.getNumContainers());
//Also check that the containers were scheduled
SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(3, info.getLiveContainers().size());
scheduler.stop();
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestDelegationTokenRenewer method setUp.
@Before
public void setUp() throws Exception {
counter = new AtomicInteger(0);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
eventQueue = new LinkedBlockingQueue<Event>();
dispatcher = new AsyncDispatcher(eventQueue);
Renewer.reset();
delegationTokenRenewer = createNewDelegationTokenRenewer(conf, counter);
RMContext mockContext = mock(RMContext.class);
ClientRMService mockClientRMService = mock(ClientRMService.class);
when(mockContext.getSystemCredentialsForApps()).thenReturn(new ConcurrentHashMap<ApplicationId, ByteBuffer>());
when(mockContext.getDelegationTokenRenewer()).thenReturn(delegationTokenRenewer);
when(mockContext.getDispatcher()).thenReturn(dispatcher);
when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234);
when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
delegationTokenRenewer.setRMContext(mockContext);
delegationTokenRenewer.init(conf);
delegationTokenRenewer.start();
}
use of org.apache.hadoop.yarn.event.AsyncDispatcher in project hadoop by apache.
the class TestFifoScheduler method testResourceUpdateDecommissioningNode.
@Test
public void testResourceUpdateDecommissioningNode() throws Exception {
// Mock the RMNodeResourceUpdate event handler to update SchedulerNode
// to have 0 available resource
RMContext spyContext = Mockito.spy(resourceManager.getRMContext());
Dispatcher mockDispatcher = mock(AsyncDispatcher.class);
when(mockDispatcher.getEventHandler()).thenReturn(new EventHandler<Event>() {
@Override
public void handle(Event event) {
if (event instanceof RMNodeResourceUpdateEvent) {
RMNodeResourceUpdateEvent resourceEvent = (RMNodeResourceUpdateEvent) event;
resourceManager.getResourceScheduler().getSchedulerNode(resourceEvent.getNodeId()).updateTotalResource(resourceEvent.getResourceOption().getResource());
}
}
});
Mockito.doReturn(mockDispatcher).when(spyContext).getDispatcher();
((FifoScheduler) resourceManager.getResourceScheduler()).setRMContext(spyContext);
((AsyncDispatcher) mockDispatcher).start();
// Register node
String host_0 = "host_0";
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4));
// ResourceRequest priorities
Priority priority_0 = Priority.newInstance(0);
// Submit an application
Application application_0 = new Application("user_0", "a1", resourceManager);
application_0.submit();
application_0.addNodeManager(host_0, 1234, nm_0);
Resource capability_0_0 = Resources.createResource(1 * GB, 1);
application_0.addResourceRequestSpec(priority_0, capability_0_0);
Task task_0_0 = new Task(application_0, priority_0, new String[] { host_0 });
application_0.addTask(task_0_0);
// Send resource requests to the scheduler
application_0.schedule();
RMNode node = resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId());
// Send a heartbeat to kick the tires on the Scheduler
NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node);
resourceManager.getResourceScheduler().handle(nodeUpdate);
// Kick off another heartbeat with the node state mocked to decommissioning
// This should update the schedulernodes to have 0 available resource
RMNode spyNode = Mockito.spy(resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId()));
when(spyNode.getState()).thenReturn(NodeState.DECOMMISSIONING);
resourceManager.getResourceScheduler().handle(new NodeUpdateSchedulerEvent(spyNode));
// Get allocations from the scheduler
application_0.schedule();
// Check the used resource is 1 GB 1 core
// Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory());
Resource usedResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
Assert.assertEquals(usedResource.getMemorySize(), 1 * GB);
Assert.assertEquals(usedResource.getVirtualCores(), 1);
// Check total resource of scheduler node is also changed to 1 GB 1 core
Resource totalResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getTotalResource();
Assert.assertEquals(totalResource.getMemorySize(), 1 * GB);
Assert.assertEquals(totalResource.getVirtualCores(), 1);
// Check the available resource is 0/0
Resource availableResource = resourceManager.getResourceScheduler().getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
Assert.assertEquals(availableResource.getMemorySize(), 0);
Assert.assertEquals(availableResource.getVirtualCores(), 0);
}
Aggregations