use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testCancelPlan.
@Test
public void testCancelPlan() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
int planVersion = rpcTestHelper.getPlanVersion();
NodePlan plan = rpcTestHelper.getPlan();
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
dataNode.cancelDiskBalancePlan(planHash);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testSubmitPlanWithInvalidVersion.
@Test
public void testSubmitPlanWithInvalidVersion() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
int planVersion = rpcTestHelper.getPlanVersion();
planVersion++;
NodePlan plan = rpcTestHelper.getPlan();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN_VERSION));
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testSubmitPlan.
@Test
public void testSubmitPlan() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
int planVersion = rpcTestHelper.getPlanVersion();
NodePlan plan = rpcTestHelper.getPlan();
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class CancelCommand method cancelPlan.
/**
* Cancels a running plan.
*
* @param planData - Plan data.
* @throws IOException
*/
private void cancelPlan(String planData) throws IOException {
Preconditions.checkNotNull(planData);
NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
String planHash = DigestUtils.shaHex(planData);
try {
dataNode.cancelDiskBalancePlan(planHash);
} catch (DiskBalancerException ex) {
LOG.error("Cancelling plan on {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
throw ex;
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class DiskBalancerCluster method computePlan.
/**
* Compute plan takes a node and constructs a planner that creates a plan that
* we would like to follow.
* <p/>
* This function creates a thread pool and executes a planner on each node
* that we are supposed to plan for. Each of these planners return a NodePlan
* that we can persist or schedule for execution with a diskBalancer
* Executor.
*
* @param thresholdPercent - in percentage
* @return list of NodePlans
*/
public List<NodePlan> computePlan(double thresholdPercent) {
List<NodePlan> planList = new LinkedList<>();
if (nodesToProcess == null) {
LOG.warn("Nodes to process is null. No nodes processed.");
return planList;
}
int poolSize = computePoolSize(nodesToProcess.size());
ExecutorService executorService = Executors.newFixedThreadPool(poolSize);
List<Future<NodePlan>> futureList = new LinkedList<>();
for (int x = 0; x < nodesToProcess.size(); x++) {
final DiskBalancerDataNode node = nodesToProcess.get(x);
final Planner planner = PlannerFactory.getPlanner(PlannerFactory.GREEDY_PLANNER, node, thresholdPercent);
futureList.add(executorService.submit(new Callable<NodePlan>() {
@Override
public NodePlan call() throws Exception {
assert planner != null;
return planner.plan(node);
}
}));
}
for (Future<NodePlan> f : futureList) {
try {
planList.add(f.get());
} catch (InterruptedException e) {
LOG.error("Compute Node plan was cancelled or interrupted : ", e);
} catch (ExecutionException e) {
LOG.error("Unable to compute plan : ", e);
}
}
return planList;
}
Aggregations