Search in sources :

Example 66 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class RMAdminCLI method updateNodeResource.

private int updateNodeResource(String nodeIdStr, int memSize, int cores, int overCommitTimeout) throws IOException, YarnException {
    // check resource value first
    if (invalidResourceValue(memSize, cores)) {
        throw new IllegalArgumentException("Invalid resource value: " + "(" + memSize + "," + cores + ") for updateNodeResource.");
    }
    // Refresh the nodes
    ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
    UpdateNodeResourceRequest request = recordFactory.newRecordInstance(UpdateNodeResourceRequest.class);
    NodeId nodeId = NodeId.fromString(nodeIdStr);
    Resource resource = Resources.createResource(memSize, cores);
    Map<NodeId, ResourceOption> resourceMap = new HashMap<NodeId, ResourceOption>();
    resourceMap.put(nodeId, ResourceOption.newInstance(resource, overCommitTimeout));
    request.setNodeResourceMap(resourceMap);
    adminProtocol.updateNodeResource(request);
    return 0;
}
Also used : ResourceManagerAdministrationProtocol(org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol) HashMap(java.util.HashMap) ResourceOption(org.apache.hadoop.yarn.api.records.ResourceOption) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Resource(org.apache.hadoop.yarn.api.records.Resource) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest)

Example 67 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class Resources method multiplyAndRoundUp.

public static Resource multiplyAndRoundUp(Resource lhs, double by) {
    Resource out = clone(lhs);
    out.setMemorySize((long) Math.ceil(lhs.getMemorySize() * by));
    out.setVirtualCores((int) Math.ceil(lhs.getVirtualCores() * by));
    return out;
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 68 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestYarnServerApiClasses method getIncreasedContainer.

private Container getIncreasedContainer(int containerID, int appAttemptId, int memory, int vCores) {
    ContainerId containerId = getContainerId(containerID, appAttemptId);
    Resource capability = Resource.newInstance(memory, vCores);
    return Container.newInstance(containerId, null, null, capability, null, null);
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 69 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestNMLeveldbStateStoreService method createContainerRequest.

private StartContainerRequest createContainerRequest(ContainerId containerId) {
    LocalResource lrsrc = LocalResource.newInstance(URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L, 1234567890L);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("rsrc", lrsrc);
    Map<String, String> env = new HashMap<String, String>();
    env.put("somevar", "someval");
    List<String> containerCmds = new ArrayList<String>();
    containerCmds.add("somecmd");
    containerCmds.add("somearg");
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    serviceData.put("someservice", ByteBuffer.wrap(new byte[] { 0x1, 0x2, 0x3 }));
    ByteBuffer containerTokens = ByteBuffer.wrap(new byte[] { 0x7, 0x8, 0x9, 0xa });
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
    acls.put(ApplicationAccessType.VIEW_APP, "viewuser");
    acls.put(ApplicationAccessType.MODIFY_APP, "moduser");
    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, env, containerCmds, serviceData, containerTokens, acls);
    Resource containerRsrc = Resource.newInstance(1357, 3);
    ContainerTokenIdentifier containerTokenId = new ContainerTokenIdentifier(containerId, "host", "user", containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7), 13579);
    Token containerToken = Token.newInstance(containerTokenId.getBytes(), ContainerTokenIdentifier.KIND.toString(), "password".getBytes(), "tokenservice");
    return StartContainerRequest.newInstance(clc, containerToken);
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Token(org.apache.hadoop.yarn.api.records.Token) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType)

Example 70 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Aggregations

Resource (org.apache.hadoop.yarn.api.records.Resource)500 Test (org.junit.Test)190 NodeId (org.apache.hadoop.yarn.api.records.NodeId)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)82 Priority (org.apache.hadoop.yarn.api.records.Priority)80 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)67 HashMap (java.util.HashMap)62 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)57 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)55 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)53 ArrayList (java.util.ArrayList)49 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)48 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)45 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)43 Container (org.apache.hadoop.yarn.api.records.Container)42 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)42 Configuration (org.apache.hadoop.conf.Configuration)34 IOException (java.io.IOException)33 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)33 Map (java.util.Map)29