use of org.apache.ignite.internal.processors.hadoop.HadoopFileBlock in project ignite by apache.
the class HadoopV2Context method getInputSplit.
/** {@inheritDoc} */
@Override
public InputSplit getInputSplit() {
if (inputSplit == null) {
HadoopInputSplit split = ctx.taskInfo().inputSplit();
if (split == null)
return null;
if (split instanceof HadoopFileBlock) {
HadoopFileBlock fileBlock = (HadoopFileBlock) split;
inputSplit = new FileSplit(new Path(fileBlock.file()), fileBlock.start(), fileBlock.length(), null);
} else {
try {
inputSplit = (InputSplit) ((HadoopV2TaskContext) ctx).getNativeSplit(split);
} catch (IgniteCheckedException e) {
throw new IllegalStateException(e);
}
}
}
return inputSplit;
}
use of org.apache.ignite.internal.processors.hadoop.HadoopFileBlock in project ignite by apache.
the class HadoopWeightedMapReducePlannerTest method testHdfsSplitsAffinity.
/**
* Test one HDFS splits.
*
* @throws Exception If failed.
*/
public void testHdfsSplitsAffinity() throws Exception {
IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
final List<HadoopInputSplit> splits = new ArrayList<>();
splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
splits.add(new HadoopFileBlock(new String[] { HOST_2 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
splits.add(new HadoopFileBlock(new String[] { HOST_3 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
// The following splits belong to hosts that are out of Ignite topology at all.
// This means that these splits should be assigned to any least loaded modes:
splits.add(new HadoopFileBlock(new String[] { HOST_4 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
splits.add(new HadoopFileBlock(new String[] { HOST_5 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
final int expReducers = 7;
HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
checkPlanMappers(plan, splits, NODES, true);
checkPlanReducers(plan, NODES, expReducers, true);
}
Aggregations