use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.
the class DiskBalancer method submitPlan.
/**
* Takes a client submitted plan and converts into a set of work items that
* can be executed by the blockMover.
*
* @param planId - A SHA-1 of the plan string
* @param planVersion - version of the plan string - for future use.
* @param planFileName - Plan file name
* @param planData - Plan data in json format
* @param force - Skip some validations and execute the plan file.
* @throws DiskBalancerException
*/
public void submitPlan(String planId, long planVersion, String planFileName, String planData, boolean force) throws DiskBalancerException {
lock.lock();
try {
checkDiskBalancerEnabled();
if ((this.future != null) && (!this.future.isDone())) {
LOG.error("Disk Balancer - Executing another plan, submitPlan failed.");
throw new DiskBalancerException("Executing another plan", DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS);
}
NodePlan nodePlan = verifyPlan(planId, planVersion, planData, force);
createWorkPlan(nodePlan);
this.planID = planId;
this.planFile = planFileName;
this.currentResult = Result.PLAN_UNDER_PROGRESS;
executePlan();
} finally {
lock.unlock();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.
the class DiskBalancer method getStorageIDToVolumeBasePathMap.
/**
* Returns volume UUID to volume base path map.
*
* @return Map
* @throws DiskBalancerException
*/
private Map<String, String> getStorageIDToVolumeBasePathMap() throws DiskBalancerException {
Map<String, String> storageIDToVolBasePathMap = new HashMap<>();
FsDatasetSpi.FsVolumeReferences references;
try {
try (AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
references = this.dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx);
storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBaseURI().getPath());
}
references.close();
}
} catch (IOException ex) {
LOG.error("Disk Balancer - Internal Error.", ex);
throw new DiskBalancerException("Internal error", ex, DiskBalancerException.Result.INTERNAL_ERROR);
}
return storageIDToVolBasePathMap;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.
the class Command method getNodes.
/**
* Returns a DiskBalancer Node list from the Cluster or null if not found.
*
* @param listArg String File URL or a comma separated list of node names.
* @return List of DiskBalancer Node
* @throws IOException
*/
protected List<DiskBalancerDataNode> getNodes(String listArg) throws IOException {
Set<String> nodeNames = null;
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
List<String> invalidNodeList = Lists.newArrayList();
if ((listArg == null) || listArg.isEmpty()) {
return nodeList;
}
nodeNames = getNodeList(listArg);
DiskBalancerDataNode node = null;
if (!nodeNames.isEmpty()) {
for (String name : nodeNames) {
node = getNode(name);
if (node != null) {
nodeList.add(node);
} else {
invalidNodeList.add(name);
}
}
}
if (!invalidNodeList.isEmpty()) {
String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
String warnMsg = String.format("The node(s) '%s' not found. " + "Please make sure that '%s' exists in the cluster.", invalidNodes, invalidNodes);
throw new DiskBalancerException(warnMsg, DiskBalancerException.Result.INVALID_NODE);
}
return nodeList;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.
the class ExecuteCommand method submitPlan.
/**
* Submits plan to a given data node.
*
* @param planFile - Plan file name
* @param planData - Plan data in json format
* @throws IOException
*/
private void submitPlan(final String planFile, final String planData) throws IOException {
Preconditions.checkNotNull(planData);
NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
String planHash = DigestUtils.shaHex(planData);
try {
// TODO : Support skipping date check.
dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION, planFile, planData, false);
} catch (DiskBalancerException ex) {
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
throw ex;
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.
the class DiskBalancer method createWorkPlan.
/**
* Convert a node plan to DiskBalancerWorkItem that Datanode can execute.
*
* @param plan - Node Plan
*/
private void createWorkPlan(NodePlan plan) throws DiskBalancerException {
Preconditions.checkState(lock.isHeldByCurrentThread());
// Cleanup any residual work in the map.
workMap.clear();
Map<String, String> storageIDToVolBasePathMap = getStorageIDToVolumeBasePathMap();
for (Step step : plan.getVolumeSetPlans()) {
String sourceVolUuid = step.getSourceVolume().getUuid();
String destVolUuid = step.getDestinationVolume().getUuid();
String sourceVolBasePath = storageIDToVolBasePathMap.get(sourceVolUuid);
if (sourceVolBasePath == null) {
final String errMsg = "Disk Balancer - Unable to find volume: " + step.getSourceVolume().getPath() + ". SubmitPlan failed.";
LOG.error(errMsg);
throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
}
String destVolBasePath = storageIDToVolBasePathMap.get(destVolUuid);
if (destVolBasePath == null) {
final String errMsg = "Disk Balancer - Unable to find volume: " + step.getDestinationVolume().getPath() + ". SubmitPlan failed.";
LOG.error(errMsg);
throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
}
VolumePair volumePair = new VolumePair(sourceVolUuid, sourceVolBasePath, destVolUuid, destVolBasePath);
createWorkPlan(volumePair, step);
}
}
Aggregations