Search in sources :

Example 1 with InvertedBinaryTreeRouter

use of edu.iu.dsc.tws.comms.routing.InvertedBinaryTreeRouter in project twister2 by DSC-SPIDAL.

the class MPIDataFlowGather method init.

/**
 * Initialize
 * @param cfg
 * @param t
 * @param taskPlan
 * @param edge
 */
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
    this.type = t;
    this.instancePlan = taskPlan;
    this.executor = taskPlan.getThisExecutor();
    // we only have one path
    this.router = new InvertedBinaryTreeRouter(cfg, taskPlan, destination, sources, index);
    // initialize the receive
    if (this.partialReceiver != null && !isLastReceiver()) {
        partialReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    if (this.finalReceiver != null && isLastReceiver()) {
        this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
    Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
    Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
    Set<Integer> srcs = router.sendQueueIds();
    for (int s : srcs) {
        // later look at how not to allocate pairs for this each time
        ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
        pendingSendMessagesPerSource.put(s, pendingSendMessages);
        serializerMap.put(s, new MPIMultiMessageSerializer(new KryoSerializer(), executor));
    }
    int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
    int receiveExecutorsSize = receivingExecutors().size();
    if (receiveExecutorsSize == 0) {
        receiveExecutorsSize = 1;
    }
    Set<Integer> execs = router.receivingExecutors();
    for (int e : execs) {
        int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
        Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
        pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
        pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
        deSerializerMap.put(e, new MPIMultiMessageDeserializer(new KryoSerializer(), executor));
    }
    Set<Integer> sourcesOfThisExec = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    for (int s : sourcesOfThisExec) {
        sendRoutingParameters(s, pathToUse);
        partialSendRoutingParameters(s, pathToUse);
    }
    delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
    delegete.setKeyType(keyType);
}
Also used : MessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageSerializer) MPIMultiMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMultiMessageSerializer) HashMap(java.util.HashMap) MPIMultiMessageDeserializer(edu.iu.dsc.tws.comms.mpi.io.MPIMultiMessageDeserializer) KryoSerializer(edu.iu.dsc.tws.comms.utils.KryoSerializer) InvertedBinaryTreeRouter(edu.iu.dsc.tws.comms.routing.InvertedBinaryTreeRouter) MessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer) MPIMultiMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMultiMessageSerializer) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair)

Example 2 with InvertedBinaryTreeRouter

use of edu.iu.dsc.tws.comms.routing.InvertedBinaryTreeRouter in project twister2 by DSC-SPIDAL.

the class MPIDataFlowReduce method init.

/**
 * Initialize
 * @param cfg
 * @param t
 * @param taskPlan
 * @param edge
 */
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
    this.instancePlan = taskPlan;
    this.config = cfg;
    this.type = t;
    this.executor = instancePlan.getThisExecutor();
    // we only have one path
    this.router = new InvertedBinaryTreeRouter(cfg, taskPlan, destination, sources, index);
    // initialize the receive
    if (this.partialReceiver != null && !isLastReceiver()) {
        partialReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    if (this.finalReceiver != null && isLastReceiver()) {
        this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    LOG.log(Level.FINE, String.format("%d reduce sources %s dest %d send tasks: %s", executor, sources, destination, router.sendQueueIds()));
    Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
    Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
    Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
    Set<Integer> srcs = router.sendQueueIds();
    for (int s : srcs) {
        // later look at how not to allocate pairs for this each time
        ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
        pendingSendMessagesPerSource.put(s, pendingSendMessages);
        serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
    }
    int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
    int receiveExecutorsSize = receivingExecutors().size();
    if (receiveExecutorsSize == 0) {
        receiveExecutorsSize = 1;
    }
    Set<Integer> execs = router.receivingExecutors();
    for (int e : execs) {
        int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
        Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
        pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
        pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
        deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
    }
    Set<Integer> sourcesOfThisExec = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    for (int s : sourcesOfThisExec) {
        sendRoutingParameters(s, pathToUse);
        partialSendRoutingParameters(s, pathToUse);
    }
    this.delegete.setCompletionListener(completionListener);
    delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
Also used : MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer) HashMap(java.util.HashMap) KryoSerializer(edu.iu.dsc.tws.comms.utils.KryoSerializer) InvertedBinaryTreeRouter(edu.iu.dsc.tws.comms.routing.InvertedBinaryTreeRouter) MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair)

Aggregations

MessageDeSerializer (edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer)2 MessageSerializer (edu.iu.dsc.tws.comms.mpi.io.MessageSerializer)2 InvertedBinaryTreeRouter (edu.iu.dsc.tws.comms.routing.InvertedBinaryTreeRouter)2 KryoSerializer (edu.iu.dsc.tws.comms.utils.KryoSerializer)2 HashMap (java.util.HashMap)2 Queue (java.util.Queue)2 ArrayBlockingQueue (java.util.concurrent.ArrayBlockingQueue)2 Pair (org.apache.commons.lang3.tuple.Pair)2 MPIMessageDeSerializer (edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer)1 MPIMessageSerializer (edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer)1 MPIMultiMessageDeserializer (edu.iu.dsc.tws.comms.mpi.io.MPIMultiMessageDeserializer)1 MPIMultiMessageSerializer (edu.iu.dsc.tws.comms.mpi.io.MPIMultiMessageSerializer)1