use of edu.iu.dsc.tws.comms.core.TaskPlan in project twister2 by DSC-SPIDAL.
the class WordAggregate method init.
@Override
public void init(Config cfg, DataFlowOperation op, Map<Integer, Map<Integer, List<Integer>>> expectedIds) {
TaskPlan plan = op.getTaskPlan();
this.executor = op.getTaskPlan().getThisExecutor();
LOG.fine(String.format("%d final expected task ids %s", plan.getThisExecutor(), expectedIds));
}
use of edu.iu.dsc.tws.comms.core.TaskPlan in project twister2 by DSC-SPIDAL.
the class WordCountUtils method createWordCountPlan.
/**
* Let assume we have 2 tasks per container and one additional for first container,
* which will be the destination
* @param plan the resource plan from scheduler
* @return task plan
*/
public static TaskPlan createWordCountPlan(Config cfg, ResourcePlan plan, int noOfTasks) {
int noOfProcs = plan.noOfContainers();
LOG.log(Level.INFO, "No of containers: " + noOfProcs);
Map<Integer, Set<Integer>> executorToGraphNodes = new HashMap<>();
Map<Integer, Set<Integer>> groupsToExeuctors = new HashMap<>();
int thisExecutor = plan.getThisId();
List<ResourceContainer> containers = plan.getContainers();
Map<String, List<ResourceContainer>> containersPerNode = new HashMap<>();
for (ResourceContainer c : containers) {
String name = (String) c.getProperty(SchedulerContext.WORKER_NAME);
List<ResourceContainer> containerList;
if (!containersPerNode.containsKey(name)) {
containerList = new ArrayList<>();
containersPerNode.put(name, containerList);
} else {
containerList = containersPerNode.get(name);
}
containerList.add(c);
}
int taskPerExecutor = noOfTasks / noOfProcs;
for (int i = 0; i < noOfProcs; i++) {
Set<Integer> nodesOfExecutor = new HashSet<>();
for (int j = 0; j < taskPerExecutor; j++) {
nodesOfExecutor.add(i * taskPerExecutor + j);
}
executorToGraphNodes.put(i, nodesOfExecutor);
}
int i = 0;
// we take each container as an executor
for (Map.Entry<String, List<ResourceContainer>> e : containersPerNode.entrySet()) {
Set<Integer> executorsOfGroup = new HashSet<>();
for (ResourceContainer c : e.getValue()) {
executorsOfGroup.add(c.getId());
}
groupsToExeuctors.put(i, executorsOfGroup);
i++;
}
LOG.fine(String.format("%d Executor To Graph: %s", plan.getThisId(), executorToGraphNodes));
LOG.fine(String.format("%d Groups to executors: %s", plan.getThisId(), groupsToExeuctors));
// and reduce task in 0th process
return new TaskPlan(executorToGraphNodes, groupsToExeuctors, thisExecutor);
}
use of edu.iu.dsc.tws.comms.core.TaskPlan in project twister2 by DSC-SPIDAL.
the class PingPongCommunication method init.
/**
* Initialize the container
* @param cfg
* @param containerId
* @param plan
*/
public void init(Config cfg, int containerId, ResourcePlan plan) {
LOG.log(Level.INFO, "Starting the example with container id: " + plan.getThisId());
this.status = Status.INIT;
// lets create the task plan
TaskPlan taskPlan = Utils.createTaskPlan(cfg, plan);
// first get the communication config file
TWSNetwork network = new TWSNetwork(cfg, taskPlan);
TWSCommunication channel = network.getDataFlowTWSCommunication();
// we are sending messages from 0th task to 1st task
Set<Integer> sources = new HashSet<>();
sources.add(0);
int dests = 1;
Map<String, Object> newCfg = new HashMap<>();
LOG.info("Setting up reduce dataflow operation");
// this method calls the init method
// I think this is wrong
direct = channel.direct(newCfg, MessageType.OBJECT, 0, sources, dests, new PingPongReceive());
if (containerId == 0) {
// the map thread where data is produced
Thread mapThread = new Thread(new MapWorker());
LOG.log(Level.INFO, "Starting map thread");
mapThread.start();
// we need to progress the communication
while (true) {
// progress the channel
channel.progress();
// we should progress the communication directive
direct.progress();
Thread.yield();
}
} else if (containerId == 1) {
while (status != Status.LOAD_RECEIVE_FINISHED) {
channel.progress();
direct.progress();
}
}
}
use of edu.iu.dsc.tws.comms.core.TaskPlan in project twister2 by DSC-SPIDAL.
the class PingPongCommunicationTaskBased method init.
/**
* Initialize the container
*/
public void init(Config cfg, int containerId, ResourcePlan plan) {
LOG.log(Level.INFO, "Starting the example with container id: " + plan.getThisId());
// Creates task an task executor instance to be used in this container
taskExecutor = new TaskExecutorFixedThread();
this.status = Status.INIT;
// lets create the task plan
TaskPlan taskPlan = Utils.createTaskPlan(cfg, plan);
// first get the communication config file
TWSNetwork network = new TWSNetwork(cfg, taskPlan);
TWSCommunication channel = network.getDataFlowTWSCommunication();
// we are sending messages from 0th task to 1st task
Set<Integer> sources = new HashSet<>();
sources.add(0);
int dests = 1;
Map<String, Object> newCfg = new HashMap<>();
LOG.info("Setting up reduce dataflow operation");
// this method calls the init method
// I think this is wrong
// TODO: Does the task genereate the communication or is it done by a controller for examples
// the direct comm between task 0 and 1 is it done by the container or the the task
// TODO: if the task creates the dataflowop does the task progress it or the executor
// TODO : FOR NOW the dataflowop is created at container and sent to task
LinkedQueue<Message> pongQueue = new LinkedQueue<Message>();
taskExecutor.registerQueue(0, pongQueue);
direct = channel.direct(newCfg, MessageType.OBJECT, 0, sources, dests, new PingPongReceive());
taskExecutor.initCommunication(channel, direct);
if (containerId == 0) {
// the map thread where data is produced
LOG.log(Level.INFO, "Starting map thread");
taskExecutor.registerTask(new MapWorker(0, direct));
LOG.log(Level.INFO, "registered map thread");
taskExecutor.submitTask(0);
taskExecutor.progres();
} else if (containerId == 1) {
taskExecutor.progres();
}
}
use of edu.iu.dsc.tws.comms.core.TaskPlan in project twister2 by DSC-SPIDAL.
the class SimpleMultiTaskGraph method init.
/**
* Init method to submit the task to the executor
*/
public void init(Config cfg, int containerId, ResourcePlan plan) {
LOG.log(Level.INFO, "Starting the example with container id: " + plan.getThisId());
taskExecutor = new TaskExecutorFixedThread();
this.status = SimpleMultiTaskGraph.Status.INIT;
TaskPlan taskPlan = Utils.createTaskPlan(cfg, plan);
TWSNetwork network = new TWSNetwork(cfg, taskPlan);
TWSCommunication channel = network.getDataFlowTWSCommunication();
Set<Integer> sources = new HashSet<>();
sources.add(0);
int destination = 1;
Map<String, Object> newCfg = new HashMap<>();
LinkedQueue<Message> pongQueue = new LinkedQueue<Message>();
taskExecutor.registerQueue(0, pongQueue);
direct = channel.direct(newCfg, MessageType.OBJECT, 0, sources, destination, new SimpleMultiTaskGraph.PingPongReceive());
taskExecutor.initCommunication(channel, direct);
// For Dataflow Task Graph Generation call the dataflow task graph generator
MapWorker sourceTask = new MapWorker(0, direct);
ReceiveWorker sinkTask = new ReceiveWorker(1);
// later we can add a different task
ReceiveWorker sinkTask1 = new ReceiveWorker(2);
dataflowTaskGraph = new DataflowTaskGraphGenerator().generateDataflowGraph(sourceTask, sinkTask, direct).generateDataflowGraph(sinkTask, sinkTask1, direct);
if (dataflowTaskGraph != null) {
dataflowTaskGraphParser = new DataflowTaskGraphParser(dataflowTaskGraph);
parsedTaskSet = dataflowTaskGraphParser.dataflowTaskGraphParseAndSchedule();
}
// the taskgraph scheduler is constructed...!
if (!parsedTaskSet.isEmpty() && containerId > 1) {
List<Task> taskList = new ArrayList<>();
for (Task processedTasks : parsedTaskSet) {
taskList.add(processedTasks);
}
LOG.info("Submitting Pipeline Task:" + taskList.size());
taskExecutionOptimizer = new TaskExecutionOptimizer(taskExecutor);
PipelinedTask pipelinedTask = new PipelinedTask(1, taskList);
pipelinedTask.execute();
}
}
Aggregations