use of java.util.Set in project hadoop by apache.
the class NodeCLI method run.
@Override
public int run(String[] args) throws Exception {
Options opts = new Options();
opts.addOption(HELP_CMD, false, "Displays help for all commands.");
opts.addOption(STATUS_CMD, true, "Prints the status report of the node.");
opts.addOption(LIST_CMD, false, "List all running nodes. " + "Supports optional use of -states to filter nodes " + "based on node state, all -all to list all nodes, " + "-showDetails to display more details about each node.");
Option nodeStateOpt = new Option(NODE_STATE_CMD, true, "Works with -list to filter nodes based on input comma-separated " + "list of node states. " + getAllValidNodeStates());
nodeStateOpt.setValueSeparator(',');
nodeStateOpt.setArgs(Option.UNLIMITED_VALUES);
nodeStateOpt.setArgName("States");
opts.addOption(nodeStateOpt);
Option allOpt = new Option(NODE_ALL, false, "Works with -list to list all nodes.");
opts.addOption(allOpt);
Option showDetailsOpt = new Option(NODE_SHOW_DETAILS, false, "Works with -list to show more details about each node.");
opts.addOption(showDetailsOpt);
opts.getOption(STATUS_CMD).setArgName("NodeId");
if (args != null && args.length > 0) {
for (int i = args.length - 1; i >= 0; i--) {
if (args[i].equalsIgnoreCase("-" + NODE_ALL)) {
args[i] = "-" + NODE_ALL;
}
}
}
int exitCode = -1;
CommandLine cliParser = null;
try {
cliParser = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
sysout.println("Missing argument for options");
printUsage(opts);
return exitCode;
}
if (cliParser.hasOption("status")) {
if (args.length != 2) {
printUsage(opts);
return exitCode;
}
printNodeStatus(cliParser.getOptionValue("status"));
} else if (cliParser.hasOption("list")) {
Set<NodeState> nodeStates = new HashSet<NodeState>();
if (cliParser.hasOption(NODE_ALL)) {
for (NodeState state : NodeState.values()) {
nodeStates.add(state);
}
} else if (cliParser.hasOption(NODE_STATE_CMD)) {
String[] types = cliParser.getOptionValues(NODE_STATE_CMD);
if (types != null) {
for (String type : types) {
if (!type.trim().isEmpty()) {
try {
nodeStates.add(NodeState.valueOf(org.apache.hadoop.util.StringUtils.toUpperCase(type.trim())));
} catch (IllegalArgumentException ex) {
sysout.println("The node state " + type + " is invalid.");
sysout.println(getAllValidNodeStates());
return exitCode;
}
}
}
}
} else {
nodeStates.add(NodeState.RUNNING);
}
// List all node details with more information.
if (cliParser.hasOption(NODE_SHOW_DETAILS)) {
listDetailedClusterNodes(nodeStates);
} else {
listClusterNodes(nodeStates);
}
} else if (cliParser.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
}
return 0;
}
use of java.util.Set in project hadoop by apache.
the class RMAdminCLI method handleReplaceLabelsOnNodes.
private int handleReplaceLabelsOnNodes(String[] args, String cmd, boolean isHAEnabled) throws IOException, YarnException, ParseException {
Options opts = new Options();
opts.addOption("replaceLabelsOnNode", true, "Replace label on node.");
opts.addOption("failOnUnknownNodes", false, "Fail on unknown nodes.");
opts.addOption("directlyAccessNodeLabelStore", false, "Directly access node label store.");
int exitCode = -1;
CommandLine cliParser = null;
try {
cliParser = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
System.err.println(NO_MAPPING_ERR_MSG);
printUsage(args[0], isHAEnabled);
return exitCode;
}
Map<NodeId, Set<String>> map = buildNodeLabelsMapFromStr(cliParser.getOptionValue("replaceLabelsOnNode"));
return replaceLabelsOnNodes(map, cliParser.hasOption("failOnUnknownNodes"), cliParser.hasOption("directlyAccessNodeLabelStore"));
}
use of java.util.Set in project hadoop by apache.
the class ContainerUpdateContext method matchContainerToOutstandingIncreaseReq.
/**
* Check if a new container is to be matched up against an outstanding
* Container increase request.
* @param node SchedulerNode.
* @param schedulerKey SchedulerRequestKey.
* @param rmContainer RMContainer.
* @return ContainerId.
*/
public ContainerId matchContainerToOutstandingIncreaseReq(SchedulerNode node, SchedulerRequestKey schedulerKey, RMContainer rmContainer) {
ContainerId retVal = null;
Container container = rmContainer.getContainer();
Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap = outstandingIncreases.get(schedulerKey);
if (resourceMap != null) {
Map<NodeId, Set<ContainerId>> locationMap = resourceMap.get(container.getResource());
if (locationMap != null) {
Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
if (containerIds != null && !containerIds.isEmpty()) {
retVal = containerIds.iterator().next();
}
}
}
// We also need to add these requests back.. to be reallocated.
if (resourceMap != null && retVal == null) {
Map<SchedulerRequestKey, Map<String, ResourceRequest>> reqsToUpdate = new HashMap<>();
Map<String, ResourceRequest> resMap = createResourceRequests(rmContainer, node, schedulerKey, rmContainer.getContainer().getResource());
reqsToUpdate.put(schedulerKey, resMap);
appSchedulingInfo.addToPlacementSets(true, reqsToUpdate);
return UNDEFINED;
}
return retVal;
}
use of java.util.Set in project hadoop by apache.
the class ContainerUpdateContext method checkAndAddToOutstandingIncreases.
/**
* Add the container to outstanding increases.
* @param rmContainer RMContainer.
* @param schedulerNode SchedulerNode.
* @param updateRequest UpdateContainerRequest.
* @return true if updated to outstanding increases was successful.
*/
public synchronized boolean checkAndAddToOutstandingIncreases(RMContainer rmContainer, SchedulerNode schedulerNode, UpdateContainerRequest updateRequest) {
Container container = rmContainer.getContainer();
SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(updateRequest, rmContainer.getAllocatedSchedulerKey());
Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap = outstandingIncreases.get(schedulerKey);
if (resourceMap == null) {
resourceMap = new HashMap<>();
outstandingIncreases.put(schedulerKey, resourceMap);
} else {
// Updating Resource for and existing increase container
if (ContainerUpdateType.INCREASE_RESOURCE == updateRequest.getContainerUpdateType()) {
cancelPreviousRequest(schedulerNode, schedulerKey);
} else {
return false;
}
}
Resource resToIncrease = getResourceToIncrease(updateRequest, rmContainer);
Map<NodeId, Set<ContainerId>> locationMap = resourceMap.get(resToIncrease);
if (locationMap == null) {
locationMap = new HashMap<>();
resourceMap.put(resToIncrease, locationMap);
}
Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
if (containerIds == null) {
containerIds = new HashSet<>();
locationMap.put(container.getNodeId(), containerIds);
}
if (outstandingDecreases.containsKey(container.getId())) {
return false;
}
containerIds.add(container.getId());
if (!Resources.isNone(resToIncrease)) {
Map<SchedulerRequestKey, Map<String, ResourceRequest>> updateResReqs = new HashMap<>();
Map<String, ResourceRequest> resMap = createResourceRequests(rmContainer, schedulerNode, schedulerKey, resToIncrease);
updateResReqs.put(schedulerKey, resMap);
appSchedulingInfo.addToPlacementSets(false, updateResReqs);
}
return true;
}
use of java.util.Set in project hadoop by apache.
the class CombineFileInputFormat method getMoreSplits.
/**
* Return all the splits in the specified set of paths
*/
private void getMoreSplits(JobContext job, List<FileStatus> stats, long maxSize, long minSizeNode, long minSizeRack, List<InputSplit> splits) throws IOException {
Configuration conf = job.getConfiguration();
// all blocks for all the files in input set
OneFileInfo[] files;
// mapping from a rack name to the list of blocks it has
HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();
// mapping from a block to the nodes on which it has replicas
HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();
// mapping from a node to the list of blocks that it contains
HashMap<String, Set<OneBlockInfo>> nodeToBlocks = new HashMap<String, Set<OneBlockInfo>>();
files = new OneFileInfo[stats.size()];
if (stats.size() == 0) {
return;
}
// populate all the blocks for all files
long totLength = 0;
int i = 0;
for (FileStatus stat : stats) {
files[i] = new OneFileInfo(stat, conf, isSplitable(job, stat.getPath()), rackToBlocks, blockToNodes, nodeToBlocks, rackToNodes, maxSize);
totLength += files[i].getLength();
}
createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength, maxSize, minSizeNode, minSizeRack, splits);
}
Aggregations