use of edu.iu.dsc.tws.comms.routing.BinaryTreeRouter in project twister2 by DSC-SPIDAL.
the class MPIDataFlowBroadcast method init.
/**
* Initialize
* @param cfg
* @param t
* @param tPlan
* @param ed
*/
public void init(Config cfg, MessageType t, TaskPlan tPlan, int ed) {
this.config = cfg;
this.instancePlan = tPlan;
this.type = t;
this.edge = ed;
this.executor = tPlan.getThisExecutor();
// we will only have one distinct route
router = new BinaryTreeRouter(cfg, tPlan, source, destinations);
if (this.finalReceiver != null) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
} else {
throw new RuntimeException("Final receiver is required");
}
LOG.log(Level.FINE, String.format("%d bast sources %d dest %s send tasks: %s", executor, source, destinations, router.sendQueueIds()));
Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = router.sendQueueIds();
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
}
int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.receivingExecutors();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
}
for (Integer s : srcs) {
routingParametersCache.put(s, sendRoutingParameters(s, 0));
}
delegete.init(cfg, t, tPlan, ed, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
use of edu.iu.dsc.tws.comms.routing.BinaryTreeRouter in project twister2 by DSC-SPIDAL.
the class TreeBroadcast method init.
/**
* Initialize
*/
public void init(Config cfg, MessageType dType, MessageType recvDType, MessageType kType, MessageType recvKType, LogicalPlan tPlan, int ed) {
this.config = cfg;
this.instancePlan = tPlan;
this.dataType = dType;
this.recvDataType = recvDType;
this.keyType = kType;
this.recvKeyType = recvKType;
this.edge = ed;
this.executor = tPlan.getThisWorker();
this.currentReceiveMessage = new ArrayBlockingQueue<>(CommunicationContext.sendPendingMax(cfg));
// we will only have one distinct route
router = new BinaryTreeRouter(cfg, tPlan, source, destinations);
if (this.finalReceiver != null) {
this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
} else {
throw new RuntimeException("Final receiver is required");
}
LOG.log(Level.FINE, String.format("%d bast sources %d dest %s send tasks: %s", executor, source, destinations, router.sendQueueIds()));
thisSources = TaskPlanUtils.getTasksOfThisWorker(tPlan, sourceSet);
Map<Integer, Queue<InMessage>> pendingReceiveMessagesPerSource = new HashMap<>();
Map<Integer, Queue<InMessage>> pendingReceiveDeSerializations = new HashMap<>();
Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
Set<Integer> srcs = router.sendQueueIds();
for (int s : srcs) {
// later look at how not to allocate pairs for this each time
ArrayBlockingQueue<OutMessage> pendingSendMessages = new ArrayBlockingQueue<>(CommunicationContext.sendPendingMax(cfg));
pendingSendMessagesPerSource.put(s, pendingSendMessages);
serializerMap.put(s, Serializers.get(kType != null, this.messageSchema));
}
int maxReceiveBuffers = CommunicationContext.receiveBufferCount(cfg);
int receiveExecutorsSize = receivingExecutors().size();
if (receiveExecutorsSize == 0) {
receiveExecutorsSize = 1;
}
Set<Integer> execs = router.getReceiveSources();
for (int e : execs) {
int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
Queue<InMessage> pendingReceiveMessages = new ArrayBlockingQueue<>(capacity);
pendingReceiveMessagesPerSource.put(source, pendingReceiveMessages);
pendingReceiveDeSerializations.put(source, new ArrayBlockingQueue<>(capacity));
deSerializerMap.put(source, Deserializers.get(kType != null, this.messageSchema));
}
calculateRoutingParameters();
for (Integer s : srcs) {
routingParametersCache.put(s, sendRoutingParameters(s, 0));
}
if (this.keyType != null) {
delegate.init(cfg, dType, recvDataType, kType, recvKType, tPlan, ed, router.receivingExecutors(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, true);
} else {
delegate.init(cfg, dType, recvDataType, tPlan, ed, router.receivingExecutors(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
}
Aggregations