use of org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest in project alluxio by Alluxio.
the class ContainerAllocatorTest method allocateFirstHostAnswer.
/*
* Creates an Answer to an addContainerRequest method. The Answer picks the first node requested
* and allocates a container on it, sending a callback to the specified container allocator
*/
private Answer<Void> allocateFirstHostAnswer(final ContainerAllocator containerAllocator) {
return new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ContainerRequest containerRequest = invocation.getArgumentAt(0, ContainerRequest.class);
Container container = Records.newRecord(Container.class);
container.setNodeId(NodeId.newInstance(containerRequest.getNodes().get(0), 0));
containerAllocator.allocateContainer(container);
return null;
}
};
}
use of org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest in project hadoop by apache.
the class ApplicationMaster method run.
/**
* Main run function for the application master
*
* @throws YarnException
* @throws IOException
*/
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
LOG.info("Starting ApplicationMaster");
// Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
// are marked as LimitedPrivate
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
// Now remove the AM->RM token so that containers cannot access it.
Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
LOG.info("Executing with tokens:");
while (iter.hasNext()) {
Token<?> token = iter.next();
LOG.info(token);
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
iter.remove();
}
}
allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
// Create appSubmitterUgi and add original tokens to it
String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
appSubmitterUgi.addCredentials(credentials);
AMRMClientAsync.AbstractCallbackHandler allocListener = new RMCallbackHandler();
amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
amRMClient.init(conf);
amRMClient.start();
containerListener = createNMCallbackHandler();
nmClientAsync = new NMClientAsyncImpl(containerListener);
nmClientAsync.init(conf);
nmClientAsync.start();
startTimelineClient(conf);
if (timelineServiceV2Enabled) {
// need to bind timelineClient
amRMClient.registerTimelineV2Client(timelineV2Client);
}
if (timelineServiceV2Enabled) {
publishApplicationAttemptEventOnTimelineServiceV2(DSEvent.DS_APP_ATTEMPT_START);
} else if (timelineServiceV1Enabled) {
publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START, domainId, appSubmitterUgi);
}
// Setup local RPC Server to accept status requests directly from clients
// TODO need to setup a protocol for client to be able to communicate to
// the RPC server
// TODO use the rpc port info to register with the RM for the client to
// send requests to this app master
// Register self with ResourceManager
// This will start heartbeating to the RM
appMasterHostname = NetUtils.getHostname();
RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl);
// Dump out information about cluster capability as seen by the
// resource manager
long maxMem = response.getMaximumResourceCapability().getMemorySize();
LOG.info("Max mem capability of resources in this cluster " + maxMem);
int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
LOG.info("Max vcores capability of resources in this cluster " + maxVCores);
// A resource ask cannot exceed the max.
if (containerMemory > maxMem) {
LOG.info("Container memory specified above max threshold of cluster." + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem);
containerMemory = maxMem;
}
if (containerVirtualCores > maxVCores) {
LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value." + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
containerVirtualCores = maxVCores;
}
List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() + " previous attempts' running containers on AM registration.");
for (Container container : previousAMRunningContainers) {
launchedContainers.add(container.getId());
}
numAllocatedContainers.addAndGet(previousAMRunningContainers.size());
int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
// executed on them ( regardless of success/failure).
for (int i = 0; i < numTotalContainersToRequest; ++i) {
ContainerRequest containerAsk = setupContainerAskForRM();
amRMClient.addContainerRequest(containerAsk);
}
numRequestedContainers.set(numTotalContainers);
}
use of org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest in project hadoop by apache.
the class TestAMRMClientContainerRequest method testLocalityRelaxationDifferentLevels.
@Test(expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest request1 = new ContainerRequest(capability, new String[] { "host1", "host2" }, null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 = new ContainerRequest(capability, null, new String[] { "rack1" }, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
use of org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest in project hadoop by apache.
the class TestAMRMClientContainerRequest method testDifferentLocalityRelaxationSamePriority.
@Test(expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest request1 = new ContainerRequest(capability, new String[] { "host1", "host2" }, null, Priority.newInstance(1), false);
client.addContainerRequest(request1);
ContainerRequest request2 = new ContainerRequest(capability, new String[] { "host3" }, null, Priority.newInstance(1), true);
client.addContainerRequest(request2);
}
use of org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest in project hadoop by apache.
the class TestAMRMClientContainerRequest method testDisableLocalityRelaxation.
@Test
public void testDisableLocalityRelaxation() {
AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>();
Configuration conf = new Configuration();
conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
client.init(conf);
Resource capability = Resource.newInstance(1024, 1);
ContainerRequest nodeLevelRequest = new ContainerRequest(capability, new String[] { "host1", "host2" }, null, Priority.newInstance(1), false);
client.addContainerRequest(nodeLevelRequest);
verifyResourceRequest(client, nodeLevelRequest, ResourceRequest.ANY, false);
verifyResourceRequest(client, nodeLevelRequest, "/rack1", false);
verifyResourceRequest(client, nodeLevelRequest, "host1", true);
verifyResourceRequest(client, nodeLevelRequest, "host2", true);
// Make sure we don't get any errors with two node-level requests at the
// same priority
ContainerRequest nodeLevelRequest2 = new ContainerRequest(capability, new String[] { "host2", "host3" }, null, Priority.newInstance(1), false);
client.addContainerRequest(nodeLevelRequest2);
AMRMClient.ContainerRequest rackLevelRequest = new AMRMClient.ContainerRequest(capability, null, new String[] { "/rack3", "/rack4" }, Priority.newInstance(2), false);
client.addContainerRequest(rackLevelRequest);
verifyResourceRequest(client, rackLevelRequest, ResourceRequest.ANY, false);
verifyResourceRequest(client, rackLevelRequest, "/rack3", true);
verifyResourceRequest(client, rackLevelRequest, "/rack4", true);
// Make sure we don't get any errors with two rack-level requests at the
// same priority
AMRMClient.ContainerRequest rackLevelRequest2 = new AMRMClient.ContainerRequest(capability, null, new String[] { "/rack4", "/rack5" }, Priority.newInstance(2), false);
client.addContainerRequest(rackLevelRequest2);
ContainerRequest bothLevelRequest = new ContainerRequest(capability, new String[] { "host3", "host4" }, new String[] { "rack1", "/otherrack" }, Priority.newInstance(3), false);
client.addContainerRequest(bothLevelRequest);
verifyResourceRequest(client, bothLevelRequest, ResourceRequest.ANY, false);
verifyResourceRequest(client, bothLevelRequest, "rack1", true);
verifyResourceRequest(client, bothLevelRequest, "/otherrack", true);
verifyResourceRequest(client, bothLevelRequest, "host3", true);
verifyResourceRequest(client, bothLevelRequest, "host4", true);
// Make sure we don't get any errors with two both-level requests at the
// same priority
ContainerRequest bothLevelRequest2 = new ContainerRequest(capability, new String[] { "host4", "host5" }, new String[] { "rack1", "/otherrack2" }, Priority.newInstance(3), false);
client.addContainerRequest(bothLevelRequest2);
}
Aggregations