use of org.apache.ignite.internal.processors.hadoop.planner.HadoopMapReducePlanGroup in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method affinityNodesForSplit.
/**
* Get affinity nodes for the given input split.
* <p>
* Order in the returned collection *is* significant, meaning that nodes containing more data
* go first. This way, the 1st nodes in the collection considered to be preferable for scheduling.
*
* @param split Split.
* @param top Topology.
* @return Affintiy nodes.
* @throws IgniteCheckedException If failed.
*/
private Collection<UUID> affinityNodesForSplit(HadoopInputSplit split, HadoopMapReducePlanTopology top) throws IgniteCheckedException {
Collection<UUID> igfsNodeIds = igfsAffinityNodesForSplit(split);
if (igfsNodeIds != null)
return igfsNodeIds;
Map<NodeIdAndLength, UUID> res = new TreeMap<>();
for (String host : split.hosts()) {
long len = split instanceof HadoopFileBlock ? ((HadoopFileBlock) split).length() : 0L;
HadoopMapReducePlanGroup grp = top.groupForHost(host);
if (grp != null) {
for (int i = 0; i < grp.nodeCount(); i++) {
UUID nodeId = grp.nodeId(i);
res.put(new NodeIdAndLength(nodeId, len), nodeId);
}
}
}
return new LinkedHashSet<>(res.values());
}
use of org.apache.ignite.internal.processors.hadoop.planner.HadoopMapReducePlanGroup in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method assignRemoteReducers.
/**
* Assign remote reducers. Assign to the least loaded first.
*
* @param cnt Count.
* @param top Topology.
* @param mappers Mappers.
* @param resMap Reducers result map.
*/
private void assignRemoteReducers(int cnt, HadoopMapReducePlanTopology top, Mappers mappers, Map<UUID, Integer> resMap) {
TreeSet<HadoopMapReducePlanGroup> set = new TreeSet<>(new GroupWeightComparator());
set.addAll(top.groups());
while (cnt-- > 0) {
// The least loaded machine.
HadoopMapReducePlanGroup grp = set.first();
// Look for nodes with assigned splits.
List<UUID> splitNodeIds = null;
for (int i = 0; i < grp.nodeCount(); i++) {
UUID nodeId = grp.nodeId(i);
if (mappers.nodeToSplits.containsKey(nodeId)) {
if (splitNodeIds == null)
splitNodeIds = new ArrayList<>(2);
splitNodeIds.add(nodeId);
}
}
// Select best node.
UUID id;
int newWeight;
if (splitNodeIds != null) {
id = splitNodeIds.get(ThreadLocalRandom.current().nextInt(splitNodeIds.size()));
newWeight = grp.weight() + locReducerWeight;
} else {
id = grp.nodeId(ThreadLocalRandom.current().nextInt(grp.nodeCount()));
newWeight = grp.weight() + rmtReducerWeight;
}
// Re-add entry with new weight.
boolean rmv = set.remove(grp);
assert rmv;
grp.weight(newWeight);
boolean add = set.add(grp);
assert add;
// Update result map.
Integer res = resMap.get(id);
resMap.put(id, res == null ? 1 : res + 1);
}
}
use of org.apache.ignite.internal.processors.hadoop.planner.HadoopMapReducePlanGroup in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method assignLocalReducers.
/**
* Assign local split reducers.
*
* @param split Split.
* @param cnt Reducer count.
* @param top Topology.
* @param mappers Mappers.
* @param resMap Reducers result map.
* @return Number of locally assigned reducers.
*/
private int assignLocalReducers(HadoopInputSplit split, int cnt, HadoopMapReducePlanTopology top, Mappers mappers, Map<UUID, Integer> resMap) {
// Dereference node.
UUID nodeId = mappers.splitToNode.get(split);
assert nodeId != null;
// Dereference group.
HadoopMapReducePlanGroup grp = top.groupForId(nodeId);
assert grp != null;
// Assign more reducers to the node until threshold is reached.
int res = 0;
while (res < cnt && grp.weight() < preferLocReducerThresholdWeight) {
res++;
grp.weight(grp.weight() + locReducerWeight);
}
// Update result map.
if (res > 0) {
Integer reducerCnt = resMap.get(nodeId);
resMap.put(nodeId, reducerCnt == null ? res : reducerCnt + res);
}
return res;
}
use of org.apache.ignite.internal.processors.hadoop.planner.HadoopMapReducePlanGroup in project ignite by apache.
the class IgniteHadoopWeightedMapReducePlanner method bestMapperNode.
/**
* Find best mapper node.
*
* @param affIds Affinity node IDs.
* @param top Topology.
* @return Result.
*/
private UUID bestMapperNode(@Nullable Collection<UUID> affIds, HadoopMapReducePlanTopology top) {
// Priority node.
UUID prioAffId = F.first(affIds);
// Find group with the least weight.
HadoopMapReducePlanGroup resGrp = null;
MapperPriority resPrio = MapperPriority.NORMAL;
int resWeight = Integer.MAX_VALUE;
for (HadoopMapReducePlanGroup grp : top.groups()) {
MapperPriority prio = groupPriority(grp, affIds, prioAffId);
int weight = grp.weight() + (prio == MapperPriority.NORMAL ? rmtMapperWeight : locMapperWeight);
if (resGrp == null || weight < resWeight || weight == resWeight && prio.value() > resPrio.value()) {
resGrp = grp;
resPrio = prio;
resWeight = weight;
}
}
assert resGrp != null;
// Update group weight for further runs.
resGrp.weight(resWeight);
// Return the best node from the group.
return bestMapperNodeForGroup(resGrp, resPrio, affIds, prioAffId);
}
use of org.apache.ignite.internal.processors.hadoop.planner.HadoopMapReducePlanGroup in project ignite by apache.
the class HadoopAbstractMapReducePlanner method topology.
/**
* Create plan topology.
*
* @param nodes Topology nodes.
* @return Plan topology.
*/
protected static HadoopMapReducePlanTopology topology(Collection<ClusterNode> nodes) {
Map<String, HadoopMapReducePlanGroup> macsMap = new HashMap<>(nodes.size());
Map<UUID, HadoopMapReducePlanGroup> idToGrp = new HashMap<>(nodes.size());
Map<String, HadoopMapReducePlanGroup> hostToGrp = new HashMap<>(nodes.size());
for (ClusterNode node : nodes) {
String macs = node.attribute(ATTR_MACS);
HadoopMapReducePlanGroup grp = macsMap.get(macs);
if (grp == null) {
grp = new HadoopMapReducePlanGroup(node, macs);
macsMap.put(macs, grp);
} else
grp.add(node);
idToGrp.put(node.id(), grp);
for (String host : node.addresses()) {
HadoopMapReducePlanGroup hostGrp = hostToGrp.get(host);
if (hostGrp == null)
hostToGrp.put(host, grp);
else
assert hostGrp == grp;
}
}
return new HadoopMapReducePlanTopology(new ArrayList<>(macsMap.values()), idToGrp, hostToGrp);
}
Aggregations