use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class GetLabelsToNodesResponsePBImpl method initLabelsToNodes.
private void initLabelsToNodes() {
if (this.labelsToNodes != null) {
return;
}
GetLabelsToNodesResponseProtoOrBuilder p = viaProto ? proto : builder;
List<LabelsToNodeIdsProto> list = p.getLabelsToNodesList();
this.labelsToNodes = new HashMap<String, Set<NodeId>>();
for (LabelsToNodeIdsProto c : list) {
Set<NodeId> setNodes = new HashSet<NodeId>();
for (NodeIdProto n : c.getNodeIdList()) {
NodeId node = new NodeIdPBImpl(n);
setNodes.add(node);
}
if (!setNodes.isEmpty()) {
this.labelsToNodes.put(c.getNodeLabels(), setNodes);
}
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class GetLabelsToNodesResponsePBImpl method addLabelsToNodesToProto.
private void addLabelsToNodesToProto() {
maybeInitBuilder();
builder.clearLabelsToNodes();
if (labelsToNodes == null) {
return;
}
Iterable<LabelsToNodeIdsProto> iterable = new Iterable<LabelsToNodeIdsProto>() {
@Override
public Iterator<LabelsToNodeIdsProto> iterator() {
return new Iterator<LabelsToNodeIdsProto>() {
Iterator<Entry<String, Set<NodeId>>> iter = labelsToNodes.entrySet().iterator();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public LabelsToNodeIdsProto next() {
Entry<String, Set<NodeId>> now = iter.next();
Set<NodeIdProto> nodeProtoSet = new HashSet<NodeIdProto>();
for (NodeId n : now.getValue()) {
nodeProtoSet.add(convertToProtoFormat(n));
}
return LabelsToNodeIdsProto.newBuilder().setNodeLabels(now.getKey()).addAllNodeId(nodeProtoSet).build();
}
@Override
public boolean hasNext() {
return iter.hasNext();
}
};
}
};
builder.addAllLabelsToNodes(iterable);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class TestAHSWebServices method testContainerLogsForFinishedApps.
@Test(timeout = 10000)
public void testContainerLogsForFinishedApps() throws Exception {
String fileName = "syslog";
String user = "user1";
NodeId nodeId = NodeId.newInstance("test host", 100);
NodeId nodeId2 = NodeId.newInstance("host2", 1234);
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
ContainerId containerId100 = ContainerId.newContainerId(appAttemptId, 100);
TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId1, nodeId, fileName, user, ("Hello." + containerId1), true);
TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId100, nodeId2, fileName, user, ("Hello." + containerId100), false);
// test whether we can find container log from remote diretory if
// the containerInfo for this container could be fetched from AHS.
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Hello." + containerId1));
// Do the same test with new API
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containers").path(containerId1.toString()).path("logs").path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Hello." + containerId1));
// test whether we can find container log from remote diretory if
// the containerInfo for this container could not be fetched from AHS.
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId100.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Hello." + containerId100));
// Do the same test with new API
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containers").path(containerId100.toString()).path("logs").path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Hello." + containerId100));
// create an application which can not be found from AHS
ApplicationId appId100 = ApplicationId.newInstance(0, 100);
ApplicationAttemptId appAttemptId100 = ApplicationAttemptId.newInstance(appId100, 1);
ContainerId containerId1ForApp100 = ContainerId.newContainerId(appAttemptId100, 1);
TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs, rootLogDir, containerId1ForApp100, nodeId, fileName, user, ("Hello." + containerId1ForApp100), true);
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Hello." + containerId1ForApp100));
int fullTextSize = responseText.getBytes().length;
String tailEndSeparator = StringUtils.repeat("*", "End of LogType:syslog".length() + 50) + "\n\n";
int tailTextSize = "\nEnd of LogType:syslog\n".getBytes().length + tailEndSeparator.getBytes().length;
String logMessage = "Hello." + containerId1ForApp100;
int fileContentSize = logMessage.getBytes().length;
// specify how many bytes we should get from logs
// if we specify a position number, it would get the first n bytes from
// container log
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals(responseText.getBytes().length, (fullTextSize - fileContentSize) + 5);
assertTrue(fullTextSize >= responseText.getBytes().length);
assertEquals(new String(responseText.getBytes(), (fullTextSize - fileContentSize - tailTextSize), 5), new String(logMessage.getBytes(), 0, 5));
// specify how many bytes we should get from logs
// if we specify a negative number, it would get the last n bytes from
// container log
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "-5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals(responseText.getBytes().length, (fullTextSize - fileContentSize) + 5);
assertTrue(fullTextSize >= responseText.getBytes().length);
assertEquals(new String(responseText.getBytes(), (fullTextSize - fileContentSize - tailTextSize), 5), new String(logMessage.getBytes(), fileContentSize - 5, 5));
// specify the bytes which is larger than the actual file size,
// we would get the full logs
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals(responseText.getBytes().length, fullTextSize);
r = resource();
response = r.path("ws").path("v1").path("applicationhistory").path("containerlogs").path(containerId1ForApp100.toString()).path(fileName).queryParam("user.name", user).queryParam("size", "-10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals(responseText.getBytes().length, fullTextSize);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class ResourceTrackerService method unRegisterNodeManager.
@SuppressWarnings("unchecked")
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(UnRegisterNodeManagerRequest request) throws YarnException, IOException {
UnRegisterNodeManagerResponse response = recordFactory.newRecordInstance(UnRegisterNodeManagerResponse.class);
NodeId nodeId = request.getNodeId();
RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
if (rmNode == null) {
LOG.info("Node not found, ignoring the unregister from node id : " + nodeId);
return response;
}
LOG.info("Node with node id : " + nodeId + " has shutdown, hence unregistering the node.");
this.nmLivelinessMonitor.unregister(nodeId);
this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeEvent(nodeId, RMNodeEventType.SHUTDOWN));
return response;
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class AMLauncher method getContainerMgrProxy.
// Protected. For tests.
protected ContainerManagementProtocol getContainerMgrProxy(final ContainerId containerId) {
final NodeId node = masterContainer.getNodeId();
final InetSocketAddress containerManagerConnectAddress = NetUtils.createSocketAddrForHost(node.getHost(), node.getPort());
final YarnRPC rpc = getYarnRPC();
UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(containerId.getApplicationAttemptId().toString());
String user = rmContext.getRMApps().get(containerId.getApplicationAttemptId().getApplicationId()).getUser();
org.apache.hadoop.yarn.api.records.Token token = rmContext.getNMTokenSecretManager().createNMToken(containerId.getApplicationAttemptId(), node, user);
currentUser.addToken(ConverterUtils.convertFromYarn(token, containerManagerConnectAddress));
return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, currentUser, rpc, containerManagerConnectAddress);
}
Aggregations