use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class Application method assign.
private synchronized void assign(SchedulerRequestKey schedulerKey, NodeType type, List<Container> containers) throws IOException, YarnException {
for (Iterator<Container> i = containers.iterator(); i.hasNext(); ) {
Container container = i.next();
String host = container.getNodeId().toString();
if (Resources.equals(requestSpec.get(schedulerKey), container.getResource())) {
// See which task can use this container
for (Iterator<Task> t = tasks.get(schedulerKey).iterator(); t.hasNext(); ) {
Task task = t.next();
if (task.getState() == State.PENDING && task.canSchedule(type, host)) {
NodeManager nodeManager = getNodeManager(host);
task.start(nodeManager, container.getId());
i.remove();
// Track application resource usage
Resources.addTo(used, container.getResource());
LOG.info("Assigned container (" + container + ") of type " + type + " to task " + task.getTaskId() + " at priority " + schedulerKey.getPriority() + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources");
// Update resource requests
updateResourceRequests(requests.get(schedulerKey), type, task);
// Launch the container
StartContainerRequest scRequest = StartContainerRequest.newInstance(createCLC(), container.getContainerToken());
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
nodeManager.startContainers(allRequests);
break;
}
}
}
}
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class Application method getResources.
public synchronized List<Container> getResources() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("getResources begin:" + " application=" + applicationId + " #ask=" + ask.size());
for (ResourceRequest request : ask) {
LOG.debug("getResources:" + " application=" + applicationId + " ask-request=" + request);
}
}
// Get resources from the ResourceManager
Allocation allocation = resourceManager.getResourceScheduler().allocate(applicationAttemptId, new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(), null, null, new ContainerUpdates());
if (LOG.isInfoEnabled()) {
LOG.info("-=======" + applicationAttemptId + System.lineSeparator() + "----------" + resourceManager.getRMContext().getRMApps().get(applicationId).getRMAppAttempt(applicationAttemptId));
}
List<Container> containers = allocation.getContainers();
// Clear state for next interaction with ResourceManager
ask.clear();
if (LOG.isDebugEnabled()) {
LOG.debug("getResources() for " + applicationId + ":" + " ask=" + ask.size() + " recieved=" + containers.size());
}
return containers;
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class NodeManager method getContainerStatuses.
@Override
public synchronized GetContainerStatusesResponse getContainerStatuses(GetContainerStatusesRequest request) throws YarnException {
List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
for (ContainerId containerId : request.getContainerIds()) {
List<Container> appContainers = containers.get(containerId.getApplicationAttemptId().getApplicationId());
Container container = null;
for (Container c : appContainers) {
if (c.getId().equals(containerId)) {
container = c;
}
}
if (container != null && containerStatusMap.get(container).getState() != null) {
statuses.add(containerStatusMap.get(container));
}
}
return GetContainerStatusesResponse.newInstance(statuses, null);
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class NodeManager method startContainers.
@Override
public synchronized StartContainersResponse startContainers(StartContainersRequest requests) throws YarnException {
for (StartContainerRequest request : requests.getStartContainerRequests()) {
Token containerToken = request.getContainerToken();
ContainerTokenIdentifier tokenId = null;
try {
tokenId = BuilderUtils.newContainerTokenIdentifier(containerToken);
} catch (IOException e) {
throw RPCUtil.getRemoteException(e);
}
ContainerId containerID = tokenId.getContainerID();
ApplicationId applicationId = containerID.getApplicationAttemptId().getApplicationId();
List<Container> applicationContainers = containers.get(applicationId);
if (applicationContainers == null) {
applicationContainers = new ArrayList<Container>();
containers.put(applicationId, applicationContainers);
}
// Sanity check
for (Container container : applicationContainers) {
if (container.getId().compareTo(containerID) == 0) {
throw new IllegalStateException("Container " + containerID + " already setup on node " + containerManagerAddress);
}
}
Container container = BuilderUtils.newContainer(containerID, this.nodeId, nodeHttpAddress, // DKDC - Doesn't matter
tokenId.getResource(), // DKDC - Doesn't matter
null, // DKDC - Doesn't matter
null);
ContainerStatus containerStatus = BuilderUtils.newContainerStatus(container.getId(), ContainerState.NEW, "", -1000, container.getResource());
applicationContainers.add(container);
containerStatusMap.put(container, containerStatus);
Resources.subtractFrom(available, tokenId.getResource());
Resources.addTo(used, tokenId.getResource());
if (LOG.isDebugEnabled()) {
LOG.debug("startContainer:" + " node=" + containerManagerAddress + " application=" + applicationId + " container=" + container + " available=" + available + " used=" + used);
}
}
StartContainersResponse response = StartContainersResponse.newInstance(null, null, null);
return response;
}
use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.
the class MockNM method containerIncreaseStatus.
public void containerIncreaseStatus(Container container) throws Exception {
ContainerStatus containerStatus = BuilderUtils.newContainerStatus(container.getId(), ContainerState.RUNNING, "Success", 0, container.getResource());
List<Container> increasedConts = Collections.singletonList(container);
nodeHeartbeat(Collections.singletonList(containerStatus), increasedConts, true, ++responseId);
}
Aggregations