Search in sources :

Example 1 with PartitionRouter

use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.

the class MPIDataFlowPartition method init.

/**
 * Initialize
 */
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
    this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
    this.thisTasks = taskPlan.getTasksOfThisExecutor();
    this.router = new PartitionRouter(taskPlan, sources, destinations);
    Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
    Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
    this.instancePlan = taskPlan;
    this.type = t;
    LOG.log(Level.FINE, String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
    for (int s : thisSources) {
        Set<Integer> integerSetMap = internal.get(s);
        if (integerSetMap != null) {
            this.dests.internal.addAll(integerSetMap);
        }
        Set<Integer> integerSetMap1 = external.get(s);
        if (integerSetMap1 != null) {
            this.dests.external.addAll(integerSetMap1);
        }
        LOG.fine(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
        break;
    }
    LOG.log(Level.FINE, String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
    // TODO : Does this send the correct receiveExpectedTaskIds for partition communication
    if (this.finalReceiver != null && isLastReceiver()) {
        this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
    Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
    Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
    Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    for (int s : srcs) {
        // later look at how not to allocate pairs for this each time
        ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
        pendingSendMessagesPerSource.put(s, pendingSendMessages);
        serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
    }
    int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
    int receiveExecutorsSize = receivingExecutors().size();
    if (receiveExecutorsSize == 0) {
        receiveExecutorsSize = 1;
    }
    Set<Integer> execs = router.receivingExecutors();
    for (int e : execs) {
        int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
        Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
        pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
        pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
        deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
    }
    for (int src : srcs) {
        for (int dest : destinations) {
            sendRoutingParameters(src, dest);
        }
    }
    delegete.setCompletionListener(completionListener);
    delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
    delegete.setKeyType(keyType);
}
Also used : Set(java.util.Set) MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer) HashMap(java.util.HashMap) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) PartitionRouter(edu.iu.dsc.tws.comms.routing.PartitionRouter) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair) KryoSerializer(edu.iu.dsc.tws.comms.utils.KryoSerializer) MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer)

Example 2 with PartitionRouter

use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.

the class MToNSimple method init.

/**
 * Initialize
 */
public void init(Config cfg, MessageType t, LogicalPlan logicalPlan, int ed) {
    this.edge = ed;
    Set<Integer> thisSources = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
    int executor = logicalPlan.getThisWorker();
    LOG.log(Level.FINE, String.format("%d setup loadbalance routing %s %s", logicalPlan.getThisWorker(), sources, destinations));
    this.router = new PartitionRouter(logicalPlan, sources, destinations);
    Map<Integer, Set<Integer>> internal = router.getInternalSendTasks();
    Map<Integer, Set<Integer>> external = router.getExternalSendTasks();
    this.instancePlan = logicalPlan;
    this.dataType = t;
    if (this.receiveType == null) {
        this.receiveType = dataType;
    }
    LOG.log(Level.FINE, String.format("%d adding internal/external routing", logicalPlan.getThisWorker()));
    for (int s : thisSources) {
        Set<Integer> integerSetMap = internal.get(s);
        if (integerSetMap != null) {
            this.internalDestinations.addAll(integerSetMap);
        }
        Set<Integer> integerSetMap1 = external.get(s);
        if (integerSetMap1 != null) {
            this.externalDestinations.addAll(integerSetMap1);
        }
        LOG.fine(String.format("%d adding internal/external routing %d", logicalPlan.getThisWorker(), s));
        break;
    }
    LOG.log(Level.FINE, String.format("%d done adding internal/external routing", logicalPlan.getThisWorker()));
    this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
    this.partialReceiver.init(cfg, this, router.partialExpectedTaskIds());
    Map<Integer, ArrayBlockingQueue<OutMessage>> pendingSendMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<InMessage>> pendingReceiveMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<InMessage>> pendingReceiveDeSerializations = new HashMap<>();
    Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
    Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
    Set<Integer> srcs = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
    Set<Integer> tempsrcs = TaskPlanUtils.getTasksOfThisWorker(logicalPlan, sources);
    // need to set minus tasks as well
    for (Integer src : tempsrcs) {
        srcs.add((src * -1) - 1);
    }
    for (int s : srcs) {
        // later look at how not to allocate pairs for this each time
        pendingSendMessagesPerSource.put(s, new ArrayBlockingQueue<>(CommunicationContext.sendPendingMax(cfg)));
        serializerMap.put(s, Serializers.get(isKeyed, this.messageSchema));
    }
    int maxReceiveBuffers = CommunicationContext.receiveBufferCount(cfg);
    int receiveExecutorsSize = receivingExecutors().size();
    if (receiveExecutorsSize == 0) {
        receiveExecutorsSize = 1;
    }
    for (int ex : sources) {
        int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
        pendingReceiveMessagesPerSource.put(ex, new ArrayBlockingQueue<>(capacity));
        pendingReceiveDeSerializations.put(ex, new ArrayBlockingQueue<>(capacity));
        deSerializerMap.put(ex, Deserializers.get(isKeyed, this.messageSchema));
    }
    for (int src : srcs) {
        routingParamCache.put(src, new Int2ObjectOpenHashMap<>());
        for (int dest : destinations) {
            sendRoutingParameters(src, dest);
        }
    }
    delegete.init(cfg, dataType, receiveType, keyType, receiveKeyType, logicalPlan, edge, router.receivingExecutors(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, isKeyed);
}
Also used : Set(java.util.Set) MessageSerializer(edu.iu.dsc.tws.api.comms.packing.MessageSerializer) HashMap(java.util.HashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) MessageDeSerializer(edu.iu.dsc.tws.api.comms.packing.MessageDeSerializer) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) PartitionRouter(edu.iu.dsc.tws.comms.routing.PartitionRouter) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Queue(java.util.Queue)

Example 3 with PartitionRouter

use of edu.iu.dsc.tws.comms.routing.PartitionRouter in project twister2 by DSC-SPIDAL.

the class MPIDataFlowLoadBalance method init.

/**
 * Initialize
 * @param cfg
 * @param t
 * @param taskPlan
 * @param edge
 */
public void init(Config cfg, MessageType t, TaskPlan taskPlan, int edge) {
    this.thisSources = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    LOG.info(String.format("%d setup loadbalance routing %s", taskPlan.getThisExecutor(), thisSources));
    this.thisTasks = taskPlan.getTasksOfThisExecutor();
    this.router = new PartitionRouter(taskPlan, sources, destinations);
    Map<Integer, Set<Integer>> internal = router.getInternalSendTasks(0);
    Map<Integer, Set<Integer>> external = router.getExternalSendTasks(0);
    this.instancePlan = taskPlan;
    this.type = t;
    LOG.info(String.format("%d adding internal/external routing", taskPlan.getThisExecutor()));
    try {
        for (int s : thisSources) {
            Set<Integer> integerSetMap = internal.get(s);
            if (integerSetMap != null) {
                this.dests.internal.addAll(integerSetMap);
            }
            Set<Integer> integerSetMap1 = external.get(s);
            if (integerSetMap1 != null) {
                this.dests.external.addAll(integerSetMap1);
            }
            LOG.info(String.format("%d adding internal/external routing %d", taskPlan.getThisExecutor(), s));
            break;
        }
    } catch (Throwable te) {
        te.printStackTrace();
    }
    LOG.info(String.format("%d done adding internal/external routing", taskPlan.getThisExecutor()));
    if (this.finalReceiver != null && isLastReceiver()) {
        this.finalReceiver.init(cfg, this, receiveExpectedTaskIds());
    }
    Map<Integer, ArrayBlockingQueue<Pair<Object, MPISendMessage>>> pendingSendMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<Pair<Object, MPIMessage>>> pendingReceiveMessagesPerSource = new HashMap<>();
    Map<Integer, Queue<MPIMessage>> pendingReceiveDeSerializations = new HashMap<>();
    Map<Integer, MessageSerializer> serializerMap = new HashMap<>();
    Map<Integer, MessageDeSerializer> deSerializerMap = new HashMap<>();
    Set<Integer> srcs = TaskPlanUtils.getTasksOfThisExecutor(taskPlan, sources);
    for (int s : srcs) {
        // later look at how not to allocate pairs for this each time
        ArrayBlockingQueue<Pair<Object, MPISendMessage>> pendingSendMessages = new ArrayBlockingQueue<Pair<Object, MPISendMessage>>(MPIContext.sendPendingMax(cfg));
        pendingSendMessagesPerSource.put(s, pendingSendMessages);
        serializerMap.put(s, new MPIMessageSerializer(new KryoSerializer()));
    }
    int maxReceiveBuffers = MPIContext.receiveBufferCount(cfg);
    int receiveExecutorsSize = receivingExecutors().size();
    if (receiveExecutorsSize == 0) {
        receiveExecutorsSize = 1;
    }
    Set<Integer> execs = router.receivingExecutors();
    for (int e : execs) {
        int capacity = maxReceiveBuffers * 2 * receiveExecutorsSize;
        Queue<Pair<Object, MPIMessage>> pendingReceiveMessages = new ArrayBlockingQueue<Pair<Object, MPIMessage>>(capacity);
        pendingReceiveMessagesPerSource.put(e, pendingReceiveMessages);
        pendingReceiveDeSerializations.put(e, new ArrayBlockingQueue<MPIMessage>(capacity));
        deSerializerMap.put(e, new MPIMessageDeSerializer(new KryoSerializer()));
    }
    delegete.init(cfg, t, taskPlan, edge, router.receivingExecutors(), router.isLastReceiver(), this, pendingSendMessagesPerSource, pendingReceiveMessagesPerSource, pendingReceiveDeSerializations, serializerMap, deSerializerMap, false);
}
Also used : Set(java.util.Set) MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer) HashMap(java.util.HashMap) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) PartitionRouter(edu.iu.dsc.tws.comms.routing.PartitionRouter) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair) KryoSerializer(edu.iu.dsc.tws.comms.utils.KryoSerializer) MPIMessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer) MessageDeSerializer(edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer) MPIMessageSerializer(edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer)

Aggregations

PartitionRouter (edu.iu.dsc.tws.comms.routing.PartitionRouter)3 HashMap (java.util.HashMap)3 Queue (java.util.Queue)3 Set (java.util.Set)3 ArrayBlockingQueue (java.util.concurrent.ArrayBlockingQueue)3 MPIMessageDeSerializer (edu.iu.dsc.tws.comms.mpi.io.MPIMessageDeSerializer)2 MPIMessageSerializer (edu.iu.dsc.tws.comms.mpi.io.MPIMessageSerializer)2 MessageDeSerializer (edu.iu.dsc.tws.comms.mpi.io.MessageDeSerializer)2 MessageSerializer (edu.iu.dsc.tws.comms.mpi.io.MessageSerializer)2 KryoSerializer (edu.iu.dsc.tws.comms.utils.KryoSerializer)2 Pair (org.apache.commons.lang3.tuple.Pair)2 MessageDeSerializer (edu.iu.dsc.tws.api.comms.packing.MessageDeSerializer)1 MessageSerializer (edu.iu.dsc.tws.api.comms.packing.MessageSerializer)1 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)1