use of org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType in project hadoop by apache.
the class NodesListManager method handleExcludeNodeList.
// Handle excluded nodes based on following rules:
// Recommission DECOMMISSIONED or DECOMMISSIONING nodes no longer excluded;
// Gracefully decommission excluded nodes that are not already
// DECOMMISSIONED nor DECOMMISSIONING; Take no action for excluded nodes
// that are already DECOMMISSIONED or DECOMMISSIONING.
private void handleExcludeNodeList(boolean graceful, Integer timeout) {
// DECOMMISSIONED/DECOMMISSIONING nodes need to be re-commissioned.
List<RMNode> nodesToRecom = new ArrayList<RMNode>();
// Nodes need to be decommissioned (graceful or forceful);
List<RMNode> nodesToDecom = new ArrayList<RMNode>();
Set<String> includes = new HashSet<String>();
Map<String, Integer> excludes = new HashMap<String, Integer>();
hostsReader.getHostDetails(includes, excludes);
for (RMNode n : this.rmContext.getRMNodes().values()) {
NodeState s = n.getState();
// An invalid node (either due to explicit exclude or not include)
// should be excluded.
boolean isExcluded = !isValidNode(n.getHostName(), includes, excludes.keySet());
String nodeStr = "node " + n.getNodeID() + " with state " + s;
if (!isExcluded) {
// Note that no action is needed for DECOMMISSIONED node.
if (s == NodeState.DECOMMISSIONING) {
LOG.info("Recommission " + nodeStr);
nodesToRecom.add(n);
}
// Otherwise no-action needed.
} else {
// exclude is true.
if (graceful) {
// Use per node timeout if exist otherwise the request timeout.
Integer timeoutToUse = (excludes.get(n.getHostName()) != null) ? excludes.get(n.getHostName()) : timeout;
if (s != NodeState.DECOMMISSIONED && s != NodeState.DECOMMISSIONING) {
LOG.info("Gracefully decommission " + nodeStr);
nodesToDecom.add(n);
} else if (s == NodeState.DECOMMISSIONING && !Objects.equals(n.getDecommissioningTimeout(), timeoutToUse)) {
LOG.info("Update " + nodeStr + " timeout to be " + timeoutToUse);
nodesToDecom.add(n);
} else {
LOG.info("No action for " + nodeStr);
}
} else {
if (s != NodeState.DECOMMISSIONED) {
LOG.info("Forcefully decommission " + nodeStr);
nodesToDecom.add(n);
}
}
}
}
for (RMNode n : nodesToRecom) {
RMNodeEvent e = new RMNodeEvent(n.getNodeID(), RMNodeEventType.RECOMMISSION);
this.rmContext.getDispatcher().getEventHandler().handle(e);
}
for (RMNode n : nodesToDecom) {
RMNodeEvent e;
if (graceful) {
Integer timeoutToUse = (excludes.get(n.getHostName()) != null) ? excludes.get(n.getHostName()) : timeout;
e = new RMNodeDecommissioningEvent(n.getNodeID(), timeoutToUse);
} else {
RMNodeEventType eventType = isUntrackedNode(n.getHostName()) ? RMNodeEventType.SHUTDOWN : RMNodeEventType.DECOMMISSION;
e = new RMNodeEvent(n.getNodeID(), eventType);
}
this.rmContext.getDispatcher().getEventHandler().handle(e);
}
updateInactiveNodes();
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType in project hadoop by apache.
the class NodesListManager method refreshNodesForcefully.
/**
* Forcefully decommission the nodes if they are in DECOMMISSIONING state
*/
public void refreshNodesForcefully() {
for (Entry<NodeId, RMNode> entry : rmContext.getRMNodes().entrySet()) {
if (entry.getValue().getState() == NodeState.DECOMMISSIONING) {
RMNodeEventType nodeEventType = isUntrackedNode(entry.getKey().getHost()) ? RMNodeEventType.SHUTDOWN : RMNodeEventType.DECOMMISSION;
this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeEvent(entry.getKey(), nodeEventType));
}
}
}
Aggregations