use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.
the class MPIDataFlowPartition method init.
/**
* Initialize
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
this.thisTasks = taskPlan.getTasksOfThisExecutor();
this.router = new PartitionRouter(taskPlan, sources, destinations);
Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
this.instancePlan = taskPlan;
this.type = t;
LOG.log(Level.FINE, String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
for (int s : thisSources) {
Set<Integer> integerSetMap = internal.get(s);
if (integerSetMap != null) {
this.dests.internal.addAll(integerSetMap);
}
Set<Integer> integerSetMap1 = external.get(s);
if (integerSetMap1 != null) {
this.dests.external.addAll(integerSetMap1);
}
LOG.fine(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
break;
}
LOG.log(Level.FINE, String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
// TODO : Does this send the correct receiveExpectedTaskIds for partition communication
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
}
for (int src : srcs) {
for (int dest : destinations) {
sendRoutingParameters(src, dest);
}
}
delegete.setCompletionListener(completionListener);
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
delegete.setKeyType(keyType);
}
use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.
the class MToNSimple method init.
/**
* Initialize
*/
public void init(Config cfg, MessageType t, LogicalPlan logicalPlan, int ed) {
this.edge = ed;
Set<Integer> thisSources = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
int executor = logicalPlan.getThisWorker();
LOG.log(Level.FINE, String.format("%d setup loadbalance routing %s %s", logicalPlan.getThisWorker(), sources, destinations));
this.router = new PartitionRouter(logicalPlan, sources, destinations);
Map<Integer, Set<Integer>> internal = router.getInternalSendTasks();
Map<Integer, Set<Integer>> external = router.getExternalSendTasks();
this.instancePlan = logicalPlan;
this.dataType = t;
if (this.receiveType == null) {
this.receiveType = dataType;
}
LOG.log(Level.FINE, String.format("%d adding internal/external routing", logicalPlan.getThisWorker()));
for (int s : thisSources) {
Set<Integer> integerSetMap = internal.get(s);
if (integerSetMap != null) {
this.internalDestinations.addAll(integerSetMap);
}
Set<Integer> integerSetMap1 = external.get(s);
if (integerSetMap1 != null) {
this.externalDestinations.addAll(integerSetMap1);
}
LOG.fine(String.format("%d adding internal/external routing %d", logicalPlan.getThisWorker(), s));
break;
}
LOG.log(Level.FINE, String.format("%d done adding internal/external routing", logicalPlan.getThisWorker()));
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
this.partialReceiver.init(cfg, this, router.partialExpectedTaskIds());
Map<Integer, ArrayBlockingQueue<OutMessage>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<InMessage>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<InMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
Set<Integer> tempsrcs = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
// need to set minus tasks as well
for (Integer src : tempsrcs) {
srcs.add((src * -1) - 1);
}
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
pendingSendMessagesPerSource.put(s, new ArrayBlockingQueue<>(CommunicationContext.sendPendingMax(cfg)));
serializerMap.put(s, Serializers.get(isKeyed, this.messageSchema));
}
int maxReceiveBuffers = CommunicationContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
for (int ex : sources) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
pendingReceiveMessagesPerSource.put(ex, new ArrayBlockingQueue<>(capacity));
pendingReceiveDeSerializations.put(ex, new ArrayBlockingQueue<>(capacity));
deSerializerMap.put(ex, Deserializers.get(isKeyed, this.messageSchema));
}
for (int src : srcs) {
routingParamCache.put(src, new Int2ObjectOpenHashMap<>());
for (int dest : destinations) {
sendRoutingParameters(src, dest);
}
}
delegete.init(cfg, dataType, receiveType, keyType, receiveKeyType, logicalPlan, edge, router.receivingExecutors(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
}
use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.
the class MPIDataFlowLoadBalance method init.
/**
* Initialize
* @param cfg
* @param t
* @param taskPlan
* @param edge
*/
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
this.thisTasks = taskPlan.getTasksOfThisExecutor();
this.router = new PartitionRouter(taskPlan, sources, destinations);
Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
this.instancePlan = taskPlan;
this.type = t;
LOG.info(String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
try {
for (int s : thisSources) {
Set<Integer> integerSetMap = internal.get(s);
if (integerSetMap != null) {
this.dests.internal.addAll(integerSetMap);
}
Set<Integer> integerSetMap1 = external.get(s);
if (integerSetMap1 != null) {
this.dests.external.addAll(integerSetMap1);
}
LOG.info(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
break;
}
} catch (Throwable te) {
te.printStackTrace();
}
LOG.info(String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
if (this.finalReceiver != null && isLastReceiver()) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
}
Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
}
delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
Aggregations