use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class TestYARNTokenIdentifier method testNMTokenIdentifier.
@Test
public void testNMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
NodeId nodeId = NodeId.newInstance("host0", 0);
String applicationSubmitter = "usr0";
int masterKeyId = 1;
NMTokenIdentifier token = new NMTokenIdentifier(appAttemptId, nodeId, applicationSubmitter, masterKeyId);
NMTokenIdentifier anotherToken = new NMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
// verify all properties are the same as original
Assert.assertEquals("appAttemptId from proto is not the same with original token", anotherToken.getApplicationAttemptId(), appAttemptId);
Assert.assertEquals("NodeId from proto is not the same with original token", anotherToken.getNodeId(), nodeId);
Assert.assertEquals("applicationSubmitter from proto is not the same with original token", anotherToken.getApplicationSubmitter(), applicationSubmitter);
Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getKeyId(), masterKeyId);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class ContainerManagerImpl method serviceStart.
@Override
protected void serviceStart() throws Exception {
// Enqueue user dirs in deletion context
Configuration conf = getConfig();
final InetSocketAddress initialAddress = conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS, YarnConfiguration.DEFAULT_NM_PORT);
boolean usingEphemeralPort = (initialAddress.getPort() == 0);
if (context.getNMStateStore().canRecover() && usingEphemeralPort) {
throw new IllegalArgumentException("Cannot support recovery with an " + "ephemeral server port. Check the setting of " + YarnConfiguration.NM_ADDRESS);
}
// If recovering then delay opening the RPC service until the recovery
// of resources and containers have completed, otherwise requests from
// clients during recovery can interfere with the recovery process.
final boolean delayedRpcServerStart = context.getNMStateStore().canRecover();
Configuration serverConf = new Configuration(conf);
// always enforce it to be token-based.
serverConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, SaslRpcServer.AuthMethod.TOKEN.toString());
YarnRPC rpc = YarnRPC.create(conf);
server = rpc.getServer(ContainerManagementProtocol.class, this, initialAddress, serverConf, this.context.getNMTokenSecretManager(), conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT, YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
refreshServiceAcls(conf, new NMPolicyProvider());
}
LOG.info("Blocking new container-requests as container manager rpc" + " server is still starting.");
this.setBlockNewContainerRequests(true);
String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
String hostOverride = null;
if (bindHost != null && !bindHost.isEmpty() && nmAddress != null && !nmAddress.isEmpty()) {
//a bind-host case with an address, to support overriding the first
//hostname found when querying for our hostname with the specified
//address, combine the specified address with the actual port listened
//on by the server
hostOverride = nmAddress.split(":")[0];
}
// setup node ID
InetSocketAddress connectAddress;
if (delayedRpcServerStart) {
connectAddress = NetUtils.getConnectAddress(initialAddress);
} else {
server.start();
connectAddress = NetUtils.getConnectAddress(server);
}
NodeId nodeId = buildNodeId(connectAddress, hostOverride);
((NodeManager.NMContext) context).setNodeId(nodeId);
this.context.getNMTokenSecretManager().setNodeId(nodeId);
this.context.getContainerTokenSecretManager().setNodeId(nodeId);
// start remaining services
super.serviceStart();
if (delayedRpcServerStart) {
waitForRecoveredContainers();
server.start();
// check that the node ID is as previously advertised
connectAddress = NetUtils.getConnectAddress(server);
NodeId serverNode = buildNodeId(connectAddress, hostOverride);
if (!serverNode.equals(nodeId)) {
throw new IOException("Node mismatch after server started, expected '" + nodeId + "' but found '" + serverNode + "'");
}
}
LOG.info("ContainerManager started at " + connectAddress);
LOG.info("ContainerManager bound to " + initialAddress);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class AdminService method refreshNodesResources.
@Override
public RefreshNodesResourcesResponse refreshNodesResources(RefreshNodesResourcesRequest request) throws YarnException, StandbyException {
final String operation = "refreshNodesResources";
UserGroupInformation user = checkAcls(operation);
final String msg = "refresh nodes.";
checkRMStatus(user.getShortUserName(), operation, msg);
RefreshNodesResourcesResponse response = recordFactory.newRecordInstance(RefreshNodesResourcesResponse.class);
try {
Configuration conf = getConfig();
Configuration configuration = new Configuration(conf);
DynamicResourceConfiguration newConf;
InputStream drInputStream = this.rmContext.getConfigurationProvider().getConfigurationInputStream(configuration, YarnConfiguration.DR_CONFIGURATION_FILE);
if (drInputStream != null) {
newConf = new DynamicResourceConfiguration(configuration, drInputStream);
} else {
newConf = new DynamicResourceConfiguration(configuration);
}
if (newConf.getNodes() != null && newConf.getNodes().length != 0) {
Map<NodeId, ResourceOption> nodeResourceMap = newConf.getNodeResourceMap();
UpdateNodeResourceRequest updateRequest = UpdateNodeResourceRequest.newInstance(nodeResourceMap);
updateNodeResource(updateRequest);
}
// refresh dynamic resource in ResourceTrackerService
this.rmContext.getResourceTrackerService().updateDynamicResourceConfiguration(newConf);
RMAuditLogger.logSuccess(user.getShortUserName(), operation, "AdminService");
return response;
} catch (IOException ioe) {
throw logAndWrapException(ioe, user.getShortUserName(), operation, msg);
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class NodesListManager method setDecomissionedNMs.
private void setDecomissionedNMs() {
Set<String> excludeList = hostsReader.getExcludedHosts();
for (final String host : excludeList) {
NodeId nodeId = createUnknownNodeId(host);
RMNodeImpl rmNode = new RMNodeImpl(nodeId, rmContext, host, -1, -1, new UnknownNode(host), Resource.newInstance(0, 0), "unknown");
rmContext.getInactiveRMNodes().put(nodeId, rmNode);
rmNode.handle(new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class NodesListManager method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
int nodeIpCacheTimeout = conf.getInt(YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS, YarnConfiguration.DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS);
if (nodeIpCacheTimeout <= 0) {
resolver = new DirectResolver();
} else {
resolver = new CachedResolver(SystemClock.getInstance(), nodeIpCacheTimeout);
addIfService(resolver);
}
// Read the hosts/exclude files to restrict access to the RM
try {
this.includesFile = conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
this.excludesFile = conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
setDecomissionedNMs();
printConfiguredHosts();
} catch (YarnException ex) {
disableHostsFileReader(ex);
} catch (IOException ioe) {
disableHostsFileReader(ioe);
}
final int nodeRemovalTimeout = conf.getInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, YarnConfiguration.DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
nodeRemovalCheckInterval = (Math.min(nodeRemovalTimeout / 2, 600000));
removalTimer = new Timer("Node Removal Timer");
removalTimer.schedule(new TimerTask() {
@Override
public void run() {
long now = Time.monotonicNow();
for (Map.Entry<NodeId, RMNode> entry : rmContext.getInactiveRMNodes().entrySet()) {
NodeId nodeId = entry.getKey();
RMNode rmNode = entry.getValue();
if (isUntrackedNode(rmNode.getHostName())) {
if (rmNode.getUntrackedTimeStamp() == 0) {
rmNode.setUntrackedTimeStamp(now);
} else if (now - rmNode.getUntrackedTimeStamp() > nodeRemovalTimeout) {
RMNode result = rmContext.getInactiveRMNodes().remove(nodeId);
if (result != null) {
decrInactiveNMMetrics(rmNode);
LOG.info("Removed " + result.getState().toString() + " node " + result.getHostName() + " from inactive nodes list");
}
}
} else {
rmNode.setUntrackedTimeStamp(0);
}
}
}
}, nodeRemovalCheckInterval, nodeRemovalCheckInterval);
super.serviceInit(conf);
}
Aggregations