use of org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner in project ignite by apache.
the class HadoopWeightedMapReducePlannerTest method testOneIgfsSplitAffinity.
/**
* Test one IGFS split being assigned to affinity node.
*
* @throws Exception If failed.
*/
public void testOneIgfsSplitAffinity() throws Exception {
IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
List<HadoopInputSplit> splits = new ArrayList<>();
splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("igfs://igfs@/file"), 0, 50));
final int expReducers = 4;
HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
assert plan.mappers() == 1;
assert plan.mapperNodeIds().size() == 1;
assert plan.mapperNodeIds().contains(ID_1);
checkPlanMappers(plan, splits, NODES, false);
checkPlanReducers(plan, NODES, expReducers, false);
}
use of org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner in project ignite by apache.
the class HadoopWeightedMapReducePlannerTest method createPlanner.
/**
* Create planner for IGFS.
*
* @param igfs IGFS.
* @return Planner.
*/
private static IgniteHadoopWeightedMapReducePlanner createPlanner(IgfsMock igfs) {
IgniteHadoopWeightedMapReducePlanner planner = new IgniteHadoopWeightedMapReducePlanner();
IgfsIgniteMock ignite = new IgfsIgniteMock(null, igfs);
GridTestUtils.setFieldValue(planner, HadoopAbstractMapReducePlanner.class, "ignite", ignite);
return planner;
}
use of org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner in project ignite by apache.
the class HadoopWeightedMapReducePlannerTest method testHdfsSplitsReplication.
/**
* Test HDFS splits with Replication == 3.
*
* @throws Exception If failed.
*/
public void testHdfsSplitsReplication() throws Exception {
IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
final List<HadoopInputSplit> splits = new ArrayList<>();
splits.add(new HadoopFileBlock(new String[] { HOST_1, HOST_2, HOST_3 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
splits.add(new HadoopFileBlock(new String[] { HOST_2, HOST_3, HOST_4 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
splits.add(new HadoopFileBlock(new String[] { HOST_3, HOST_4, HOST_5 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
// The following splits belong to hosts that are out of Ignite topology at all.
// This means that these splits should be assigned to any least loaded modes:
splits.add(new HadoopFileBlock(new String[] { HOST_4, HOST_5, HOST_1 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
splits.add(new HadoopFileBlock(new String[] { HOST_5, HOST_1, HOST_2 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
final int expReducers = 8;
HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
checkPlanMappers(plan, splits, NODES, true);
checkPlanReducers(plan, NODES, expReducers, true);
}
use of org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner in project ignite by apache.
the class HadoopWeightedPlannerMapReduceTest method createHadoopConfiguration.
/**
* {@inheritDoc}
*/
@Override
protected HadoopConfiguration createHadoopConfiguration() {
HadoopConfiguration hadoopCfg = new HadoopConfiguration();
// Use weighted planner with default settings:
IgniteHadoopWeightedMapReducePlanner planner = new IgniteHadoopWeightedMapReducePlanner();
hadoopCfg.setMapReducePlanner(planner);
return hadoopCfg;
}
use of org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner in project ignite by apache.
the class HadoopWeightedMapReducePlannerTest method testHdfsSplitsAffinity.
/**
* Test one HDFS splits.
*
* @throws Exception If failed.
*/
public void testHdfsSplitsAffinity() throws Exception {
IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
final List<HadoopInputSplit> splits = new ArrayList<>();
splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
splits.add(new HadoopFileBlock(new String[] { HOST_2 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
splits.add(new HadoopFileBlock(new String[] { HOST_3 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
// The following splits belong to hosts that are out of Ignite topology at all.
// This means that these splits should be assigned to any least loaded modes:
splits.add(new HadoopFileBlock(new String[] { HOST_4 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
splits.add(new HadoopFileBlock(new String[] { HOST_5 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
final int expReducers = 7;
HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
checkPlanMappers(plan, splits, NODES, true);
checkPlanReducers(plan, NODES, expReducers, true);
}
Aggregations