Search in sources :

Example 76 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class TestAHSWebServices method testContainerLogsForFinishedApps.

@Test(timeout = 10000)
public void testContainerLogsForFinishedApps() throws Exception {
    String fileName = "syslog";
    String user = "user1";
    NodeId nodeId = NodeId.newInstance("test host", 100);
    NodeId nodeId2 = NodeId.newInstance("host2", 1234);
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
    ContainerId containerId100 = ContainerId.newContainerId(appAttemptId, 100);
    TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId1, nodeId, fileName, user, ("Hello." + containerId1), true);
    TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId100, nodeId2, fileName, user, ("Hello." + containerId100), false);
    // test whether we can find container log from remote diretory if
    // the containerInfo for this container could be fetched from AHS.
    WebResource r = resource();
    ClientResponse response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    String responseText = response.getEntity(String.class);
    assertTrue(responseText.contains("Hello." + containerId1));
    // Do the same test with new API
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containers").path(containerId1.toString()).path("logs").path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertTrue(responseText.contains("Hello." + containerId1));
    // test whether we can find container log from remote diretory if
    // the containerInfo for this container could not be fetched from AHS.
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId100.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertTrue(responseText.contains("Hello." + containerId100));
    // Do the same test with new API
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containers").path(containerId100.toString()).path("logs").path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertTrue(responseText.contains("Hello." + containerId100));
    // create an application which can not be found from AHS
    ApplicationId appId100 = ApplicationId.newInstance(0, 100);
    ApplicationAttemptId appAttemptId100 = ApplicationAttemptId.newInstance(appId100, 1);
    ContainerId containerId1ForApp100 = ContainerId.newContainerId(appAttemptId100, 1);
    TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId1ForApp100, nodeId, fileName, user, ("Hello." + containerId1ForApp100), true);
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertTrue(responseText.contains("Hello." + containerId1ForApp100));
    int fullTextSize = responseText.getBytes().length;
    String tailEndSeparator = StringUtils.repeat("*", "End of LogType:syslog".length() + 50) + "\n\n";
    int tailTextSize = "\nEnd of LogType:syslog\n".getBytes().length + tailEndSeparator.getBytes().length;
    String logMessage = "Hello." + containerId1ForApp100;
    int fileContentSize = logMessage.getBytes().length;
    // specify how many bytes we should get from logs
    // if we specify a position number, it would get the first n bytes from
    // container log
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertEquals(responseText.getBytes().length, (fullTextSize - fileContentSize) + 5);
    assertTrue(fullTextSize >= responseText.getBytes().length);
    assertEquals(new String(responseText.getBytes(), (fullTextSize - fileContentSize - tailTextSize), 5), new String(logMessage.getBytes(), 0, 5));
    // specify how many bytes we should get from logs
    // if we specify a negative number, it would get the last n bytes from
    // container log
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "-5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertEquals(responseText.getBytes().length, (fullTextSize - fileContentSize) + 5);
    assertTrue(fullTextSize >= responseText.getBytes().length);
    assertEquals(new String(responseText.getBytes(), (fullTextSize - fileContentSize - tailTextSize), 5), new String(logMessage.getBytes(), fileContentSize - 5, 5));
    // specify the bytes which is larger than the actual file size,
    // we would get the full logs
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertEquals(responseText.getBytes().length, fullTextSize);
    r = resource();
    response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "-10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
    responseText = response.getEntity(String.class);
    assertEquals(responseText.getBytes().length, fullTextSize);
}
Also used : ClientResponse(com.sun.jersey.api.client.ClientResponse) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) WebResource(com.sun.jersey.api.client.WebResource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 77 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class TestLogsCLI method testListNodeInfo.

@Test(timeout = 15000)
public void testListNodeInfo() throws Exception {
    String remoteLogRootDir = "target/logs/";
    Configuration configuration = new Configuration();
    configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
    configuration.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir);
    configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
    configuration.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
    ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 2);
    containerIds.add(containerId1);
    containerIds.add(containerId2);
    List<NodeId> nodeIds = new ArrayList<NodeId>();
    NodeId nodeId1 = NodeId.newInstance("localhost1", 1234);
    NodeId nodeId2 = NodeId.newInstance("localhost2", 2345);
    nodeIds.add(nodeId1);
    nodeIds.add(nodeId2);
    String rootLogDir = "target/LocalLogs";
    FileSystem fs = FileSystem.get(configuration);
    createContainerLogs(configuration, remoteLogRootDir, rootLogDir, fs, appId, containerIds, nodeIds);
    YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED, UserGroupInformation.getCurrentUser().getShortUserName());
    LogsCLI cli = new LogsCLIForTest(mockYarnClient);
    cli.setConf(configuration);
    cli.run(new String[] { "-applicationId", appId.toString(), "-list_nodes" });
    assertTrue(sysOutStream.toString().contains(LogAggregationUtils.getNodeString(nodeId1)));
    assertTrue(sysOutStream.toString().contains(LogAggregationUtils.getNodeString(nodeId2)));
    sysOutStream.reset();
    fs.delete(new Path(remoteLogRootDir), true);
    fs.delete(new Path(rootLogDir), true);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 78 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class TestRMAdminCLI method testRefreshNodesGracefulBeforeTimeout.

@Test
public void testRefreshNodesGracefulBeforeTimeout() throws Exception {
    // graceful decommission before timeout
    String[] args = { "-refreshNodes", "-g", "1", "-client" };
    CheckForDecommissioningNodesResponse response = Records.newRecord(CheckForDecommissioningNodesResponse.class);
    HashSet<NodeId> decomNodes = new HashSet<NodeId>();
    response.setDecommissioningNodes(decomNodes);
    when(admin.checkForDecommissioningNodes(any(CheckForDecommissioningNodesRequest.class))).thenReturn(response);
    assertEquals(0, rmAdminCLI.run(args));
    verify(admin).refreshNodes(RefreshNodesRequest.newInstance(DecommissionType.GRACEFUL, 1));
    verify(admin, never()).refreshNodes(RefreshNodesRequest.newInstance(DecommissionType.FORCEFUL));
}
Also used : CheckForDecommissioningNodesRequest(org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest) NodeId(org.apache.hadoop.yarn.api.records.NodeId) CheckForDecommissioningNodesResponse(org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 79 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class TestRMAdminCLI method testUpdateNodeResource.

@Test(timeout = 500)
public void testUpdateNodeResource() throws Exception {
    String nodeIdStr = "0.0.0.0:0";
    int memSize = 2048;
    int cores = 2;
    String[] args = { "-updateNodeResource", nodeIdStr, Integer.toString(memSize), Integer.toString(cores) };
    assertEquals(0, rmAdminCLI.run(args));
    ArgumentCaptor<UpdateNodeResourceRequest> argument = ArgumentCaptor.forClass(UpdateNodeResourceRequest.class);
    verify(admin).updateNodeResource(argument.capture());
    UpdateNodeResourceRequest request = argument.getValue();
    Map<NodeId, ResourceOption> resourceMap = request.getNodeResourceMap();
    NodeId nodeId = NodeId.fromString(nodeIdStr);
    Resource expectedResource = Resources.createResource(memSize, cores);
    ResourceOption resource = resourceMap.get(nodeId);
    assertNotNull("resource for " + nodeIdStr + " shouldn't be null.", resource);
    assertEquals("resource value for " + nodeIdStr + " is not as expected.", ResourceOption.newInstance(expectedResource, ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), resource);
}
Also used : ResourceOption(org.apache.hadoop.yarn.api.records.ResourceOption) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Resource(org.apache.hadoop.yarn.api.records.Resource) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) Test(org.junit.Test)

Example 80 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class ApplicationHistoryManagerOnTimelineStore method convertToContainerReport.

private static ContainerReport convertToContainerReport(TimelineEntity entity, String serverHttpAddress, String user) {
    int allocatedMem = 0;
    int allocatedVcore = 0;
    String allocatedHost = null;
    int allocatedPort = -1;
    int allocatedPriority = 0;
    long createdTime = 0;
    long finishedTime = 0;
    String diagnosticsInfo = null;
    int exitStatus = ContainerExitStatus.INVALID;
    ContainerState state = null;
    String nodeHttpAddress = null;
    Map<String, Object> entityInfo = entity.getOtherInfo();
    if (entityInfo != null) {
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO)) {
            allocatedMem = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_VCORE_INFO)) {
            allocatedVcore = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_VCORE_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_INFO)) {
            allocatedHost = entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_INFO).toString();
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PORT_INFO)) {
            allocatedPort = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_PORT_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO)) {
            allocatedPriority = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO)) {
            nodeHttpAddress = (String) entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO);
        }
    }
    List<TimelineEvent> events = entity.getEvents();
    if (events != null) {
        for (TimelineEvent event : events) {
            if (event.getEventType().equals(ContainerMetricsConstants.CREATED_EVENT_TYPE)) {
                createdTime = event.getTimestamp();
            } else if (event.getEventType().equals(ContainerMetricsConstants.FINISHED_EVENT_TYPE)) {
                finishedTime = event.getTimestamp();
                Map<String, Object> eventInfo = event.getEventInfo();
                if (eventInfo == null) {
                    continue;
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO)) {
                    diagnosticsInfo = eventInfo.get(ContainerMetricsConstants.DIAGNOSTICS_INFO).toString();
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.EXIT_STATUS_INFO)) {
                    exitStatus = (Integer) eventInfo.get(ContainerMetricsConstants.EXIT_STATUS_INFO);
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.STATE_INFO)) {
                    state = ContainerState.valueOf(eventInfo.get(ContainerMetricsConstants.STATE_INFO).toString());
                }
            }
        }
    }
    ContainerId containerId = ContainerId.fromString(entity.getEntityId());
    String logUrl = null;
    NodeId allocatedNode = null;
    if (allocatedHost != null) {
        allocatedNode = NodeId.newInstance(allocatedHost, allocatedPort);
        logUrl = WebAppUtils.getAggregatedLogURL(serverHttpAddress, allocatedNode.toString(), containerId.toString(), containerId.toString(), user);
    }
    return ContainerReport.newInstance(ContainerId.fromString(entity.getEntityId()), Resource.newInstance(allocatedMem, allocatedVcore), allocatedNode, Priority.newInstance(allocatedPriority), createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state, nodeHttpAddress);
}
Also used : TimelineEvent(org.apache.hadoop.yarn.api.records.timeline.TimelineEvent) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ContainerState(org.apache.hadoop.yarn.api.records.ContainerState) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

NodeId (org.apache.hadoop.yarn.api.records.NodeId)257 Test (org.junit.Test)137 Resource (org.apache.hadoop.yarn.api.records.Resource)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)74 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)59 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)46 Container (org.apache.hadoop.yarn.api.records.Container)44 ArrayList (java.util.ArrayList)43 Configuration (org.apache.hadoop.conf.Configuration)40 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)40 HashSet (java.util.HashSet)39 Set (java.util.Set)36 HashMap (java.util.HashMap)35 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 Priority (org.apache.hadoop.yarn.api.records.Priority)32 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)31 IOException (java.io.IOException)29 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)29 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)28