use of org.apache.hadoop.yarn.client.api.YarnClient in project cdap by caskdata.
the class YarnTokenUtils method obtainToken.
/**
* Gets a Yarn delegation token and stores it in the given Credentials.
*
* @return the same Credentials instance as the one given in parameter.
*/
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
if (!UserGroupInformation.isSecurityEnabled()) {
return credentials;
}
try {
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(configuration);
yarnClient.start();
try {
Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);
// TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
// CDAP-4825 is resolved
List<String> services = new ArrayList<>();
if (HAUtil.isHAEnabled(configuration)) {
// If HA is enabled, we need to enumerate all RM hosts
// and add the corresponding service name to the token service
// Copy the yarn conf since we need to modify it to get the RM addresses
YarnConfiguration yarnConf = new YarnConfiguration(configuration);
for (String rmId : HAUtil.getRMHAIds(configuration)) {
yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
services.add(SecurityUtil.buildTokenService(address).toString());
}
} else {
services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
}
Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, null);
token.setService(new Text(Joiner.on(',').join(services)));
credentials.addToken(new Text(token.getService()), token);
// OK to log, it won't log the credential, only information about the token.
LOG.debug("Added RM delegation token: {}", token);
} finally {
yarnClient.stop();
}
return credentials;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project cdap by caskdata.
the class YarnNodes method collect.
@Override
public synchronized void collect() throws Exception {
reset();
List<NodeReport> nodeReports;
YarnClient yarnClient = createYARNClient();
try {
nodeReports = yarnClient.getNodeReports();
} finally {
yarnClient.stop();
}
for (NodeReport nodeReport : nodeReports) {
switch(nodeReport.getNodeState()) {
case RUNNING:
healthyNodes++;
healthyContainers += nodeReport.getNumContainers();
break;
case UNHEALTHY:
case DECOMMISSIONED:
case LOST:
unusableNodes++;
unusableContainers += nodeReport.getNumContainers();
break;
case NEW:
case REBOOTED:
newNodes++;
newContainers += nodeReport.getNumContainers();
break;
}
}
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project cdap by caskdata.
the class YarnQueues method collect.
@Override
public synchronized void collect() throws Exception {
reset();
List<QueueInfo> queues;
YarnClient yarnClient = createYARNClient();
try {
queues = yarnClient.getAllQueues();
} finally {
yarnClient.stop();
}
for (QueueInfo queue : queues) {
switch(queue.getQueueState()) {
case RUNNING:
running++;
break;
case STOPPED:
stopped++;
break;
}
}
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project cdap by caskdata.
the class YarnResources method collect.
@Override
public synchronized void collect() throws Exception {
reset();
List<NodeReport> nodeReports;
YarnClient yarnClient = createYARNClient();
try {
nodeReports = yarnClient.getNodeReports();
} finally {
yarnClient.stop();
}
for (NodeReport nodeReport : nodeReports) {
NodeId nodeId = nodeReport.getNodeId();
LOG.debug("Got report for node {}", nodeId);
if (!nodeReport.getNodeState().isUnusable()) {
Resource nodeCapability = nodeReport.getCapability();
Resource nodeUsed = nodeReport.getUsed();
// some versions of hadoop return null, others do not
if (nodeCapability != null) {
LOG.debug("node {} resource capability: memory = {}, vcores = {}", nodeId, nodeCapability.getMemory(), nodeCapability.getVirtualCores());
totalMemory += nodeCapability.getMemory();
totalVCores += nodeCapability.getVirtualCores();
}
if (nodeUsed != null) {
LOG.debug("node {} resources used: memory = {}, vcores = {}", nodeId, nodeUsed.getMemory(), nodeUsed.getVirtualCores());
usedMemory += nodeUsed.getMemory();
usedVCores += nodeUsed.getVirtualCores();
}
}
}
}
use of org.apache.hadoop.yarn.client.api.YarnClient in project h2o-3 by h2oai.
the class H2OYarnDiagnostic method run.
private void run() throws IOException, YarnException {
YarnClient yarnClient;
yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
List<NodeReport> clusterNodeReports = yarnClient.getNodeReports();
List<QueueInfo> rootQueues = yarnClient.getRootQueueInfos();
QueueInfo queueInfo = yarnClient.getQueueInfo(this.queueName);
if (queueInfo == null) {
printErrorDiagnosis("Queue not found (" + this.queueName + ")");
return;
}
System.out.println("");
printYarnClusterMetrics(yarnClient);
System.out.println("");
printClusterNodeReports(clusterNodeReports);
System.out.println("");
printQueueInfo(queueInfo);
System.out.println("");
printQueueCapacity(clusterNodeReports, queueInfo, rootQueues);
System.out.println("");
printDiagnosis(clusterNodeReports);
}
Aggregations