use of edu.iu.dsc.tws.api.compute.schedule.elements.Resource in project twister2 by DSC-SPIDAL.
the class DataLocalityBatchTaskScheduler method schedule.
/**
* This is the base method for the data locality aware task scheduling for scheduling the batch
* task instances. It retrieves the task vertex set of the task graph and send the set to the
* data locality aware scheduling algorithm to allocate the batch task instances which are closer
* to the data nodes.
*/
@Override
public TaskSchedulePlan schedule(ComputeGraph graph, WorkerPlan workerPlan) {
LinkedHashMap<Integer, WorkerSchedulePlan> containerPlans = new LinkedHashMap<>();
for (int i = 0; i < workerPlan.getNumberOfWorkers(); i++) {
dataLocalityAwareAllocation.put(i, new ArrayList<>());
}
LinkedHashSet<Vertex> taskVertexSet = new LinkedHashSet<>(graph.getTaskVertexSet());
TaskVertexParser taskVertexParser = new TaskVertexParser();
List<Set<Vertex>> taskVertexList = taskVertexParser.parseVertexSet(graph);
for (Set<Vertex> vertexSet : taskVertexList) {
Map<Integer, List<TaskInstanceId>> containerInstanceMap;
if (vertexSet.size() > 1) {
containerInstanceMap = dataLocalityBatchSchedulingAlgorithm(graph, vertexSet, workerPlan);
} else {
Vertex vertex = vertexSet.iterator().next();
containerInstanceMap = dataLocalityBatchSchedulingAlgorithm(graph, vertex, workerPlan);
}
TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(containerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(containerInstanceMap, taskVertexSet);
for (int containerId : containerInstanceMap.keySet()) {
double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
List<TaskInstanceId> taskTaskInstanceIds = containerInstanceMap.get(containerId);
Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
for (TaskInstanceId id : taskTaskInstanceIds) {
double instanceRAMValue = instancesRamMap.get(containerId).get(id);
double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
containerRAMValue += instanceRAMValue;
containerDiskValue += instanceDiskValue;
containerCpuValue += instanceDiskValue;
}
Worker worker = workerPlan.getWorker(containerId);
Resource containerResource;
if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
} else {
containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
}
WorkerSchedulePlan taskWorkerSchedulePlan;
if (containerPlans.containsKey(containerId)) {
taskWorkerSchedulePlan = containerPlans.get(containerId);
taskWorkerSchedulePlan.getTaskInstances().addAll(taskInstancePlanMap.values());
} else {
taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new HashSet<>(taskInstancePlanMap.values()), containerResource);
containerPlans.put(containerId, taskWorkerSchedulePlan);
}
}
}
TaskSchedulePlan taskSchedulePlan = new TaskSchedulePlan(0, new HashSet<>(containerPlans.values()));
Map<Integer, WorkerSchedulePlan> containersMap = taskSchedulePlan.getContainersMap();
for (Map.Entry<Integer, WorkerSchedulePlan> entry : containersMap.entrySet()) {
Integer integer = entry.getKey();
WorkerSchedulePlan workerSchedulePlan = entry.getValue();
Set<TaskInstancePlan> containerPlanTaskInstances = workerSchedulePlan.getTaskInstances();
LOG.fine("Task Details for Container Id:" + integer + "\tsize:" + containerPlanTaskInstances.size());
for (TaskInstancePlan ip : containerPlanTaskInstances) {
LOG.fine("TaskId:" + ip.getTaskId() + "\tTask Index" + ip.getTaskIndex() + "\tTask Name:" + ip.getTaskName());
}
}
return taskSchedulePlan;
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.Resource in project twister2 by DSC-SPIDAL.
the class Container method assertHasSpace.
private void assertHasSpace(Resource resourceValue) throws TaskSchedulerException {
Resource usedResources = this.getTotalUsedResources();
double newRam = usedResources.getRam() + resourceValue.getRam() + paddingPercentage;
double newDisk = usedResources.getDisk() + resourceValue.getDisk() + paddingPercentage;
double newCpu = usedResources.getCpu() + resourceValue.getCpu() + paddingPercentage;
if (newRam > this.resource.getRam()) {
throw new TaskSchedulerException(String.format("Adding %s bytes of ram to existing %s " + "bytes with %d percent padding would exceed capacity %s", resourceValue.getRam(), usedResources.getRam(), paddingPercentage, this.resource.getRam()));
}
if (newDisk > this.resource.getDisk()) {
throw new TaskSchedulerException(String.format("Adding %s bytes of disk to existing %s " + "bytes with %s percent padding would exceed capacity %s", resourceValue.getDisk(), usedResources.getDisk(), paddingPercentage, this.resource.getDisk()));
}
if (newCpu > this.resource.getCpu()) {
throw new TaskSchedulerException(String.format("Adding %s cores to existing %s " + "cores with %d percent padding would exceed capacity %s", resourceValue.getCpu(), usedResources.getCpu(), paddingPercentage, this.resource.getCpu()));
}
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.Resource in project twister2 by DSC-SPIDAL.
the class FirstFitStreamingTaskScheduler method initialize.
/**
* This method initialize the config values received from the user and assign the default
* task instance value and container instance values.
*/
@Override
public void initialize(Config cfg) {
this.config = cfg;
double instanceRAM = TaskSchedulerContext.taskInstanceRam(this.config);
double instanceDisk = TaskSchedulerContext.taskInstanceDisk(this.config);
double instanceCPU = TaskSchedulerContext.taskInstanceCpu(this.config);
this.paddingPercentage = TaskSchedulerContext.containerPaddingPercentage(this.config);
this.defaultResourceValue = new Resource(instanceRAM, instanceDisk, instanceCPU);
int defaultNoOfTaskInstances = TaskSchedulerContext.defaultTaskInstancesPerContainer(this.config);
instanceRAM = this.defaultResourceValue.getRam() * defaultNoOfTaskInstances;
instanceDisk = this.defaultResourceValue.getDisk() * defaultNoOfTaskInstances;
instanceCPU = this.defaultResourceValue.getCpu() * defaultNoOfTaskInstances;
this.maxContainerResourceValue = new Resource((double) Math.round(TaskScheduleUtils.increaseBy(instanceRAM, paddingPercentage)), (double) Math.round(TaskScheduleUtils.increaseBy(instanceDisk, paddingPercentage)), (double) Math.round(TaskScheduleUtils.increaseBy(instanceCPU, paddingPercentage)));
LOG.fine("Instance default values:" + "RamValue:" + instanceRAM + "\t" + "DiskValue:" + instanceDisk + "\t" + "CPUValue:" + instanceCPU);
LOG.fine("Container default values:" + "RamValue:" + this.maxContainerResourceValue.getRam() + "\t" + "DiskValue:" + this.maxContainerResourceValue.getDisk() + "\t" + "CPUValue:" + this.maxContainerResourceValue.getCpu());
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.Resource in project twister2 by DSC-SPIDAL.
the class FirstFitStreamingTaskScheduler method getSortedRAMInstances.
/**
* This method sort the task instances in an increasing order based on the required ram
* configuration values.
*/
private ArrayList<RequiredRam> getSortedRAMInstances(Set<String> taskNameSet) {
ArrayList<RequiredRam> ramRequirements = new ArrayList<>();
TreeSet<Vertex> orderedTaskSet = new TreeSet<>(new VertexComparator());
orderedTaskSet.addAll(this.taskVertexSet);
Map<String, Double> taskRamMap = taskAttributes.getTaskRamMap(this.taskVertexSet);
for (String taskName : taskNameSet) {
Resource resource = TaskScheduleUtils.getResourceRequirement(taskName, taskRamMap, this.defaultResourceValue, this.maxContainerResourceValue, this.paddingPercentage);
ramRequirements.add(new RequiredRam(taskName, resource.getRam()));
}
ramRequirements.sort(Collections.reverseOrder());
return ramRequirements;
}
use of edu.iu.dsc.tws.api.compute.schedule.elements.Resource in project twister2 by DSC-SPIDAL.
the class RoundRobinTaskScheduler method schedule.
/**
* This is the base method which receives the dataflow taskgraph and the worker plan to allocate
* the task instances to the appropriate workers with their required ram, disk, and cpu values.
*
* @return TaskSchedulePlan
*/
@Override
public TaskSchedulePlan schedule(ComputeGraph computeGraph, WorkerPlan workerPlan) {
// Allocate the task instances into the containers/workers
Set<WorkerSchedulePlan> workerSchedulePlans = new LinkedHashSet<>();
// To get the vertex set from the taskgraph
Set<Vertex> taskVertexSet = new LinkedHashSet<>(computeGraph.getTaskVertexSet());
// Allocate the task instances into the logical containers.
Map<Integer, List<TaskInstanceId>> roundRobinContainerInstanceMap = roundRobinSchedulingAlgorithm(computeGraph, workerPlan.getNumberOfWorkers());
TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(roundRobinContainerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(roundRobinContainerInstanceMap, taskVertexSet);
Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(roundRobinContainerInstanceMap, taskVertexSet);
for (int containerId : roundRobinContainerInstanceMap.keySet()) {
double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
List<TaskInstanceId> taskTaskInstanceIds = roundRobinContainerInstanceMap.get(containerId);
Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
for (TaskInstanceId id : taskTaskInstanceIds) {
double instanceRAMValue = instancesRamMap.get(containerId).get(id);
double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
containerRAMValue += instanceRAMValue;
containerDiskValue += instanceDiskValue;
containerCpuValue += instanceDiskValue;
}
Worker worker = workerPlan.getWorker(containerId);
Resource containerResource;
// Create the container resource value based on the worker plan
if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
} else {
containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
}
// Schedule the task instance plan into the task container plan.
WorkerSchedulePlan taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new LinkedHashSet<>(taskInstancePlanMap.values()), containerResource);
workerSchedulePlans.add(taskWorkerSchedulePlan);
}
return new TaskSchedulePlan(0, workerSchedulePlans);
}
Aggregations