use of edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan in project twister2 by DSC-SPIDAL.
the class ExecutionPlanBuilder method build.
@Override
public ExecutionPlan build(Config cfg, ComputeGraph taskGraph, TaskSchedulePlan taskSchedule) {
// we need to build the task plan
LogicalPlan logicalPlan = TaskPlanBuilder.build(workerId, workerInfoList, taskSchedule, taskIdGenerator);
ParallelOperationFactory opFactory = new ParallelOperationFactory(cfg, network, logicalPlan);
Map<Integer, WorkerSchedulePlan> containersMap = taskSchedule.getContainersMap();
WorkerSchedulePlan conPlan = containersMap.get(workerId);
if (conPlan == null) {
LOG.log(Level.INFO, "Cannot find worker in the task plan: " + workerId);
return null;
}
ExecutionPlan execution = new ExecutionPlan();
Set<TaskInstancePlan> instancePlan = conPlan.getTaskInstances();
long tasksVersion = 0L;
if (CheckpointingContext.isCheckpointingEnabled(cfg)) {
Set<Integer> globalTasks = Collections.emptySet();
if (workerId == 0) {
globalTasks = containersMap.values().stream().flatMap(containerPlan -> containerPlan.getTaskInstances().stream()).filter(ip -> taskGraph.vertex(ip.getTaskName()).getTask() instanceof CheckpointableTask && !(taskGraph.vertex(ip.getTaskName()).getTask() instanceof CheckpointingSGatherSink)).map(TaskInstancePlan::getTaskId).collect(Collectors.toSet());
}
try {
Checkpoint.FamilyInitializeResponse familyInitializeResponse = this.checkpointingClient.initFamily(workerId, containersMap.size(), taskGraph.getGraphName(), globalTasks);
tasksVersion = familyInitializeResponse.getVersion();
} catch (BlockingSendException e) {
throw new RuntimeException("Failed to register tasks with Checkpoint Manager", e);
}
LOG.info("Tasks will start with version " + tasksVersion);
}
// for each task we are going to create the communications
for (TaskInstancePlan ip : instancePlan) {
Vertex v = taskGraph.vertex(ip.getTaskName());
Map<String, Set<String>> inEdges = new HashMap<>();
Map<String, String> outEdges = new HashMap<>();
if (v == null) {
throw new RuntimeException("Non-existing task scheduled: " + ip.getTaskName());
}
INode node = v.getTask();
if (node instanceof ICompute || node instanceof ISource) {
// lets get the communication
Set<Edge> edges = taskGraph.outEdges(v);
// now lets create the communication object
for (Edge e : edges) {
Vertex child = taskGraph.childOfTask(v, e.getName());
// lets figure out the parents task id
Set<Integer> srcTasks = taskIdGenerator.getTaskIds(v, ip.getTaskId());
Set<Integer> tarTasks = taskIdGenerator.getTaskIds(child, getTaskIdOfTask(child.getName(), taskSchedule));
Map<Integer, Integer> srcGlobalToIndex = taskIdGenerator.getGlobalTaskToIndex(v, ip.getTaskId());
Map<Integer, Integer> tarGlobaToIndex = taskIdGenerator.getGlobalTaskToIndex(child, getTaskIdOfTask(child.getName(), taskSchedule));
createCommunication(child, e, v, srcTasks, tarTasks, srcGlobalToIndex, tarGlobaToIndex);
outEdges.put(e.getName(), child.getName());
}
}
if (node instanceof ICompute) {
// lets get the parent tasks
Set<Edge> parentEdges = taskGraph.inEdges(v);
for (Edge e : parentEdges) {
Vertex parent = taskGraph.getParentOfTask(v, e.getName());
// lets figure out the parents task id
Set<Integer> srcTasks = taskIdGenerator.getTaskIds(parent, getTaskIdOfTask(parent.getName(), taskSchedule));
Set<Integer> tarTasks = taskIdGenerator.getTaskIds(v, ip.getTaskId());
Map<Integer, Integer> srcGlobalToIndex = taskIdGenerator.getGlobalTaskToIndex(parent, getTaskIdOfTask(parent.getName(), taskSchedule));
Map<Integer, Integer> tarGlobalToIndex = taskIdGenerator.getGlobalTaskToIndex(v, ip.getTaskId());
createCommunication(v, e, parent, srcTasks, tarTasks, srcGlobalToIndex, tarGlobalToIndex);
// if we are a grouped edge, we have to use the group name
String inEdge;
if (e.getTargetEdge() == null) {
inEdge = e.getName();
} else {
inEdge = e.getTargetEdge();
}
Set<String> parents = inEdges.get(inEdge);
if (parents == null) {
parents = new HashSet<>();
}
parents.add(inEdge);
inEdges.put(inEdge, parents);
}
}
// lets create the instance
INodeInstance iNodeInstance = createInstances(cfg, taskGraph.getGraphName(), ip, v, taskGraph.getOperationMode(), inEdges, outEdges, taskSchedule, tasksVersion);
// add to execution
execution.addNodes(v.getName(), taskIdGenerator.generateGlobalTaskId(ip.getTaskId(), ip.getTaskIndex()), iNodeInstance);
}
// now lets create the queues and start the execution
for (Table.Cell<String, String, Communication> cell : parOpTable.cellSet()) {
Communication c = cell.getValue();
// lets create the communication
OperationMode operationMode = taskGraph.getOperationMode();
IParallelOperation op;
assert c != null;
c.build();
if (c.getEdge().size() == 1) {
op = opFactory.build(c.getEdge(0), c.getSourceTasks(), c.getTargetTasks(), operationMode, c.srcGlobalToIndex, c.tarGlobalToIndex);
} else if (c.getEdge().size() > 1) {
// just join op for now. Could change in the future
// here the sources should be separated out for left and right edge
Set<Integer> sourceTasks = c.getSourceTasks();
Set<Integer> leftSources = new HashSet<>();
Set<Integer> rightSources = new HashSet<>();
if (!sourceTasks.isEmpty()) {
// just to safely do .get() calls without isPresent()
int minBin = (sourceTasks.stream().min(Integer::compareTo).get() / TaskIdGenerator.TASK_OFFSET) * TaskIdGenerator.TASK_OFFSET;
for (Integer source : sourceTasks) {
if ((source / TaskIdGenerator.TASK_OFFSET) * TaskIdGenerator.TASK_OFFSET == minBin) {
leftSources.add(source);
} else {
rightSources.add(source);
}
}
}
// now determine, which task is connected to which edge
Edge leftEdge = c.getEdge(0);
Edge rightEdge = c.getEdge(1);
op = opFactory.build(leftEdge, rightEdge, leftSources, rightSources, c.getTargetTasks(), operationMode, c.srcGlobalToIndex, c.tarGlobalToIndex);
} else {
throw new RuntimeException("Cannot have communication with 0 edges");
}
// now lets check the sources and targets that are in this executor
Set<Integer> sourcesOfThisWorker = intersectionOfTasks(conPlan, c.getSourceTasks());
Set<Integer> targetsOfThisWorker = intersectionOfTasks(conPlan, c.getTargetTasks());
// we use the target edge as the group name
String targetEdge;
if (c.getEdge().size() > 1) {
targetEdge = c.getEdge(0).getTargetEdge();
} else {
targetEdge = c.getEdge(0).getName();
}
// so along with the operation mode, the windowing mode must be tested
if (operationMode == OperationMode.STREAMING) {
for (Integer i : sourcesOfThisWorker) {
boolean found = false;
// we can have multiple source tasks for an operation
for (int sIndex = 0; sIndex < c.getSourceTask().size(); sIndex++) {
String sourceTask = c.getSourceTask().get(sIndex);
if (streamingTaskInstances.contains(sourceTask, i)) {
TaskStreamingInstance taskStreamingInstance = streamingTaskInstances.get(sourceTask, i);
taskStreamingInstance.registerOutParallelOperation(c.getEdge(sIndex).getName(), op);
op.registerSync(i, taskStreamingInstance);
found = true;
} else if (streamingSourceInstances.contains(sourceTask, i)) {
SourceStreamingInstance sourceStreamingInstance = streamingSourceInstances.get(sourceTask, i);
sourceStreamingInstance.registerOutParallelOperation(c.getEdge(sIndex).getName(), op);
found = true;
}
if (!found) {
throw new RuntimeException("Not found: " + c.getSourceTask());
}
}
}
// we only have one target task always
for (Integer i : targetsOfThisWorker) {
if (streamingTaskInstances.contains(c.getTargetTask(), i)) {
TaskStreamingInstance taskStreamingInstance = streamingTaskInstances.get(c.getTargetTask(), i);
op.register(i, taskStreamingInstance.getInQueue());
taskStreamingInstance.registerInParallelOperation(targetEdge, op);
op.registerSync(i, taskStreamingInstance);
} else {
throw new RuntimeException("Not found: " + c.getTargetTask());
}
}
execution.addOps(op);
}
if (operationMode == OperationMode.BATCH) {
for (Integer i : sourcesOfThisWorker) {
boolean found = false;
// we can have multiple source tasks for an operation
for (int sIndex = 0; sIndex < c.getSourceTask().size(); sIndex++) {
String sourceTask = c.getSourceTask().get(sIndex);
if (batchTaskInstances.contains(sourceTask, i)) {
TaskBatchInstance taskBatchInstance = batchTaskInstances.get(sourceTask, i);
taskBatchInstance.registerOutParallelOperation(c.getEdge(sIndex).getName(), op);
found = true;
} else if (batchSourceInstances.contains(sourceTask, i)) {
SourceBatchInstance sourceBatchInstance = batchSourceInstances.get(sourceTask, i);
sourceBatchInstance.registerOutParallelOperation(c.getEdge(sIndex).getName(), op);
found = true;
}
}
if (!found) {
throw new RuntimeException("Not found: " + c.getSourceTask());
}
}
for (Integer i : targetsOfThisWorker) {
if (batchTaskInstances.contains(c.getTargetTask(), i)) {
TaskBatchInstance taskBatchInstance = batchTaskInstances.get(c.getTargetTask(), i);
op.register(i, taskBatchInstance.getInQueue());
taskBatchInstance.registerInParallelOperation(targetEdge, op);
op.registerSync(i, taskBatchInstance);
} else {
throw new RuntimeException("Not found: " + c.getTargetTask());
}
}
execution.addOps(op);
}
}
return execution;
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan in project twister2 by DSC-SPIDAL.
the class RoundRobinBatchTaskScheduler method schedule.
/**
* This is the base method which receives the dataflow taskgraph and the worker plan to allocate
* the task instances to the appropriate workers with their required ram, disk, and cpu values.
*/
@Override
public TaskSchedulePlan schedule(ComputeGraph computeGraph, WorkerPlan workerPlan) {
Map<Integer, List<TaskInstanceId>> containerInstanceMap;
Map<Integer, WorkerSchedulePlan> containerPlans = new LinkedHashMap<>();
for (int i = 0; i < workerPlan.getNumberOfWorkers(); i++) {
roundRobinAllocation.put(i, new ArrayList<>());
}
// To retrieve the batch task instances(it may be single task vertex or a batch of task vertices)
Set<Vertex> taskVertexSet = new LinkedHashSet<>(computeGraph.getTaskVertexSet());
TaskVertexParser taskGraphParser = new TaskVertexParser();
List<Set<Vertex>> taskVertexList = taskGraphParser.parseVertexSet(computeGraph);
for (Set<Vertex> vertexSet : taskVertexList) {
if (vertexSet.size() > 1) {
containerInstanceMap = roundRobinBatchSchedulingAlgorithm(computeGraph, vertexSet);
} else {
Vertex vertex = vertexSet.iterator().next();
containerInstanceMap = roundRobinBatchSchedulingAlgorithm(computeGraph, vertex);
}
TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(containerInstanceMap, taskVertexSet);
for (int containerId : containerInstanceMap.keySet()) {
double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
List<TaskInstanceId> taskTaskInstanceIds = containerInstanceMap.get(containerId);
Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
for (TaskInstanceId id : taskTaskInstanceIds) {
double instanceRAMValue = instancesRamMap.get(containerId).get(id);
double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
containerRAMValue += instanceRAMValue;
containerDiskValue += instanceDiskValue;
containerCpuValue += instanceDiskValue;
}
Worker worker = workerPlan.getWorker(containerId);
Resource containerResource;
if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
} else {
containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
}
WorkerSchedulePlan taskWorkerSchedulePlan;
if (containerPlans.containsKey(containerId)) {
taskWorkerSchedulePlan = containerPlans.get(containerId);
taskWorkerSchedulePlan.getTaskInstances().addAll(taskInstancePlanMap.values());
} else {
taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new HashSet<>(taskInstancePlanMap.values()), containerResource);
containerPlans.put(containerId, taskWorkerSchedulePlan);
}
}
}
return new TaskSchedulePlan(0, new HashSet<>(containerPlans.values()));
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan in project twister2 by DSC-SPIDAL.
the class DataLocalityStreamingTaskScheduler method schedule.
/**
* This is the base method for the data locality aware task scheduling for scheduling the
* streaming task instances. It retrieves the task vertex set of the task graph and send the set
* to the data locality aware scheduling algorithm to schedule the streaming task instances
* which are closer to the data nodes.
*/
@Override
public TaskSchedulePlan schedule(ComputeGraph graph, WorkerPlan workerPlan) {
// Represents task schedule plan Id
int taskSchedulePlanId = 0;
Set<WorkerSchedulePlan> workerSchedulePlans = new HashSet<>();
Set<Vertex> taskVertexSet = graph.getTaskVertexSet();
Map<Integer, List<TaskInstanceId>> containerInstanceMap = dataLocalityStreamingSchedulingAlgorithm(graph, workerPlan.getNumberOfWorkers(), workerPlan);
TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(containerInstanceMap, taskVertexSet);
for (int containerId : containerInstanceMap.keySet()) {
double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
List<TaskInstanceId> taskTaskInstanceIds = containerInstanceMap.get(containerId);
Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
for (TaskInstanceId id : taskTaskInstanceIds) {
double instanceRAMValue = instancesRamMap.get(containerId).get(id);
double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
containerRAMValue += instanceRAMValue;
containerDiskValue += instanceDiskValue;
containerCpuValue += instanceDiskValue;
}
Worker worker = workerPlan.getWorker(containerId);
Resource containerResource;
if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
} else {
containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
}
WorkerSchedulePlan taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new HashSet<>(taskInstancePlanMap.values()), containerResource);
workerSchedulePlans.add(taskWorkerSchedulePlan);
}
return new TaskSchedulePlan(taskSchedulePlanId, workerSchedulePlans);
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan in project twister2 by DSC-SPIDAL.
the class UserDefinedTaskScheduler method schedule.
/**
* This is the base method which receives the dataflow taskgraph and the worker plan to allocate
* the task instances to the appropriate workers with their required ram, disk, and cpu values.
*
* @return TaskSchedulePlan
*/
@Override
public TaskSchedulePlan schedule(ComputeGraph graph, WorkerPlan workerPlan) {
int taskSchedulePlanId = 0;
// Allocate the task instances into the containers/workers
Set<WorkerSchedulePlan> workerSchedulePlans = new LinkedHashSet<>();
// To get the vertex set from the taskgraph
Set<Vertex> taskVertexSet = graph.getTaskVertexSet();
// Allocate the task instances into the logical containers.
Map<Integer, List<TaskInstanceId>> userDefinedContainerInstanceMap = userDefinedSchedulingAlgorithm(graph, workerPlan.getNumberOfWorkers());
TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(userDefinedContainerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(userDefinedContainerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(userDefinedContainerInstanceMap, taskVertexSet);
for (int containerId : userDefinedContainerInstanceMap.keySet()) {
double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
List<TaskInstanceId> taskTaskInstanceIds = userDefinedContainerInstanceMap.get(containerId);
Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
for (TaskInstanceId id : taskTaskInstanceIds) {
double instanceRAMValue = instancesRamMap.get(containerId).get(id);
double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
containerRAMValue += instanceRAMValue;
containerDiskValue += instanceDiskValue;
containerCpuValue += instanceDiskValue;
}
Worker worker = workerPlan.getWorker(containerId);
Resource containerResource;
// Create the container resource value based on the worker plan
if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
LOG.fine("Worker (if loop):" + containerId + "\tRam:" + worker.getRam() + "\tDisk:" + worker.getDisk() + "\tCpu:" + worker.getCpu());
} else {
containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
LOG.fine("Worker (else loop):" + containerId + "\tRam:" + containerRAMValue + "\tDisk:" + containerDiskValue + "\tCpu:" + containerCpuValue);
}
// Schedule the task instance plan into the task container plan.
WorkerSchedulePlan taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new HashSet<>(taskInstancePlanMap.values()), containerResource);
workerSchedulePlans.add(taskWorkerSchedulePlan);
}
return new TaskSchedulePlan(taskSchedulePlanId, workerSchedulePlans);
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan in project twister2 by DSC-SPIDAL.
the class TaskSchedulePlanBuilder method getContainers.
/**
* Get the containers based on the task schedule plan
*/
private Map<Integer, Container> getContainers(TaskSchedulePlan previoustaskschedulePlan) throws TaskSchedulerException {
Map<Integer, Container> containerMap = new HashMap<>();
Resource resource = previoustaskschedulePlan.getMaxContainerResources();
for (WorkerSchedulePlan currentWorkerSchedulePlan : previoustaskschedulePlan.getContainers()) {
Container container = new Container(currentWorkerSchedulePlan.getContainerId(), resource, requestedContainerPadding);
for (TaskInstancePlan instancePlan : currentWorkerSchedulePlan.getTaskInstances()) {
try {
addToContainer(container, instancePlan, taskIndexes, taskIds);
} catch (TaskSchedulerException e) {
throw new TaskSchedulerException(String.format("Insufficient container resources to add instancePlan %s to container %s", instancePlan, container), e);
}
}
containerMap.put(currentWorkerSchedulePlan.getContainerId(), container);
}
return containerMap;
}
Aggregations