use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class ApplicationMasterService method registerApplicationMaster.
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnException, IOException {
AMRMTokenIdentifier amrmTokenIdentifier = YarnServerSecurityUtils.authorizeRequest();
ApplicationAttemptId applicationAttemptId = amrmTokenIdentifier.getApplicationAttemptId();
ApplicationId appID = applicationAttemptId.getApplicationId();
AllocateResponseLock lock = responseMap.get(applicationAttemptId);
if (lock == null) {
RMAuditLogger.logFailure(this.rmContext.getRMApps().get(appID).getUser(), AuditConstants.REGISTER_AM, "Application doesn't exist in cache " + applicationAttemptId, "ApplicationMasterService", "Error in registering application master", appID, applicationAttemptId);
throwApplicationDoesNotExistInCacheException(applicationAttemptId);
}
// Allow only one thread in AM to do registerApp at a time.
synchronized (lock) {
AllocateResponse lastResponse = lock.getAllocateResponse();
if (hasApplicationMasterRegistered(applicationAttemptId)) {
String message = "Application Master is already registered : " + appID;
LOG.warn(message);
RMAuditLogger.logFailure(this.rmContext.getRMApps().get(appID).getUser(), AuditConstants.REGISTER_AM, "", "ApplicationMasterService", message, appID, applicationAttemptId);
throw new InvalidApplicationMasterRequestException(message);
}
this.amLivelinessMonitor.receivedPing(applicationAttemptId);
RMApp app = this.rmContext.getRMApps().get(appID);
// Setting the response id to 0 to identify if the
// application master is register for the respective attemptid
lastResponse.setResponseId(0);
lock.setAllocateResponse(lastResponse);
LOG.info("AM registration " + applicationAttemptId);
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppAttemptRegistrationEvent(applicationAttemptId, request.getHost(), request.getRpcPort(), request.getTrackingUrl()));
RMAuditLogger.logSuccess(app.getUser(), AuditConstants.REGISTER_AM, "ApplicationMasterService", appID, applicationAttemptId);
// Pick up min/max resource from scheduler...
RegisterApplicationMasterResponse response = recordFactory.newRecordInstance(RegisterApplicationMasterResponse.class);
response.setMaximumResourceCapability(rScheduler.getMaximumResourceCapability(app.getQueue()));
response.setApplicationACLs(app.getRMAppAttempt(applicationAttemptId).getSubmissionContext().getAMContainerSpec().getApplicationACLs());
response.setQueue(app.getQueue());
if (UserGroupInformation.isSecurityEnabled()) {
LOG.info("Setting client token master key");
response.setClientToAMTokenMasterKey(java.nio.ByteBuffer.wrap(rmContext.getClientToAMTokenSecretManager().getMasterKey(applicationAttemptId).getEncoded()));
}
// and corresponding NM tokens.
if (app.getApplicationSubmissionContext().getKeepContainersAcrossApplicationAttempts()) {
List<Container> transferredContainers = rScheduler.getTransferredContainers(applicationAttemptId);
if (!transferredContainers.isEmpty()) {
response.setContainersFromPreviousAttempts(transferredContainers);
List<NMToken> nmTokens = new ArrayList<NMToken>();
for (Container container : transferredContainers) {
try {
NMToken token = rmContext.getNMTokenSecretManager().createAndGetNMToken(app.getUser(), applicationAttemptId, container);
if (null != token) {
nmTokens.add(token);
}
} catch (IllegalArgumentException e) {
// will be automatically retried by RMProxy in RPC layer.
if (e.getCause() instanceof UnknownHostException) {
throw (UnknownHostException) e.getCause();
}
}
}
response.setNMTokensFromPreviousAttempts(nmTokens);
LOG.info("Application " + appID + " retrieved " + transferredContainers.size() + " containers from previous" + " attempts and " + nmTokens.size() + " NM tokens.");
}
}
response.setSchedulerResourceTypes(rScheduler.getSchedulingResourceTypes());
return response;
}
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class OpportunisticContainerAllocatorAMService method handleNewContainers.
private void handleNewContainers(List<Container> allocContainers, boolean isRemotelyAllocated) {
for (Container container : allocContainers) {
// Create RMContainer
RMContainer rmContainer = SchedulerUtils.createOpportunisticRmContainer(rmContext, container, isRemotelyAllocated);
rmContainer.handle(new RMContainerEvent(container.getId(), RMContainerEventType.ACQUIRED));
}
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class Application method assign.
private synchronized void assign(SchedulerRequestKey schedulerKey, NodeType type, List<Container> containers) throws IOException, YarnException {
for (Iterator<Container> i = containers.iterator(); i.hasNext(); ) {
Container container = i.next();
String host = container.getNodeId().toString();
if (Resources.equals(requestSpec.get(schedulerKey), container.getResource())) {
// See which task can use this container
for (Iterator<Task> t = tasks.get(schedulerKey).iterator(); t.hasNext(); ) {
Task task = t.next();
if (task.getState() == State.PENDING && task.canSchedule(type, host)) {
NodeManager nodeManager = getNodeManager(host);
task.start(nodeManager, container.getId());
i.remove();
// Track application resource usage
Resources.addTo(used, container.getResource());
LOG.info("Assigned container (" + container + ") of type " + type + " to task " + task.getTaskId() + " at priority " + schedulerKey.getPriority() + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources");
// Update resource requests
updateResourceRequests(requests.get(schedulerKey), type, task);
// Launch the container
StartContainerRequest scRequest = StartContainerRequest.newInstance(createCLC(), container.getContainerToken());
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
nodeManager.startContainers(allRequests);
break;
}
}
}
}
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class Application method getResources.
public synchronized List<Container> getResources() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("getResources begin:" + " application=" + applicationId + " #ask=" + ask.size());
for (ResourceRequest request : ask) {
LOG.debug("getResources:" + " application=" + applicationId + " ask-request=" + request);
}
}
// Get resources from the ResourceManager
Allocation allocation = resourceManager.getResourceScheduler().allocate(applicationAttemptId, new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(), null, null, new ContainerUpdates());
if (LOG.isInfoEnabled()) {
LOG.info("-=======" + applicationAttemptId + System.lineSeparator() + "----------" + resourceManager.getRMContext().getRMApps().get(applicationId).getRMAppAttempt(applicationAttemptId));
}
List<Container> containers = allocation.getContainers();
// Clear state for next interaction with ResourceManager
ask.clear();
if (LOG.isDebugEnabled()) {
LOG.debug("getResources() for " + applicationId + ":" + " ask=" + ask.size() + " recieved=" + containers.size());
}
return containers;
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class NodeManager method getContainerStatuses.
@Override
public synchronized GetContainerStatusesResponse getContainerStatuses(GetContainerStatusesRequest request) throws YarnException {
List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
for (ContainerId containerId : request.getContainerIds()) {
List<Container> appContainers = containers.get(containerId.getApplicationAttemptId().getApplicationId());
Container container = null;
for (Container c : appContainers) {
if (c.getId().equals(containerId)) {
container = c;
}
}
if (container != null && containerStatusMap.get(container).getState() != null) {
statuses.add(containerStatusMap.get(container));
}
}
return GetContainerStatusesResponse.newInstance(statuses, null);
}
Aggregations