Search in sources :

Example 11 with TaskInstanceId

use of edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId in project twister2 by DSC-SPIDAL.

the class TaskInstanceMapCalculation method getInstancesCPUMapInContainer.

/**
 * It receives the container instance allocation map and calculate the required number of
 * task instances with cpu values.
 * @param containerInstanceAllocationMap
 * @param taskVertexSet
 * @return
 */
public Map<Integer, Map<TaskInstanceId, Double>> getInstancesCPUMapInContainer(Map<Integer, List<TaskInstanceId>> containerInstanceAllocationMap, Set<Vertex> taskVertexSet) {
    Map<String, Double> taskCpuMap = taskAttributes.getTaskCPUMap(taskVertexSet);
    HashMap<Integer, Map<TaskInstanceId, Double>> instancesCpuContainerMap = new HashMap<>();
    for (int containerId : containerInstanceAllocationMap.keySet()) {
        Double usedCPUValue = 0.0;
        List<TaskInstanceId> taskInstanceIds = containerInstanceAllocationMap.get(containerId);
        Map<TaskInstanceId, Double> containerCPUMap = new HashMap<>();
        instancesCpuContainerMap.put(containerId, containerCPUMap);
        List<TaskInstanceId> instancesToBeCalculated = new ArrayList<>();
        for (TaskInstanceId taskInstanceId : taskInstanceIds) {
            String taskName = taskInstanceId.getTaskName();
            if (taskCpuMap.containsKey(taskName)) {
                Double taskCpuValue = taskCpuMap.get(taskName);
                containerCPUMap.put(taskInstanceId, taskCpuValue);
                usedCPUValue += taskCpuValue;
            } else {
                instancesToBeCalculated.add(taskInstanceId);
            }
        }
        Double containerCpuValue = getContainerCpuValue(containerInstanceAllocationMap);
        int instancesAllocationSize = instancesToBeCalculated.size();
        if (instancesAllocationSize != 0) {
            Double instanceRequiredCpu = 0.0;
            if (!containerCpuValue.equals(NOT_SPECIFIED_NUMBER_VALUE)) {
                double remainingCpu = containerCpuValue - DEFAULT_CPU_PADDING_PER_CONTAINER - usedCPUValue;
                instanceRequiredCpu = remainingCpu / instancesAllocationSize;
            }
            for (TaskInstanceId taskInstanceId : instancesToBeCalculated) {
                containerCPUMap.put(taskInstanceId, instanceRequiredCpu);
            }
            LOG.fine("Instances Required CPU:\t" + instanceRequiredCpu);
        }
    }
    return instancesCpuContainerMap;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TaskInstanceId(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId) Map(java.util.Map) HashMap(java.util.HashMap)

Example 12 with TaskInstanceId

use of edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId in project twister2 by DSC-SPIDAL.

the class TaskInstanceMapCalculation method getInstancesDiskMapInContainer.

/**
 * It receives the container instance allocation map and calculate the required number of
 * instances with disk values.
 * @param containerInstanceAllocationMap
 * @param taskVertexSet
 * @return
 */
public Map<Integer, Map<TaskInstanceId, Double>> getInstancesDiskMapInContainer(Map<Integer, List<TaskInstanceId>> containerInstanceAllocationMap, Set<Vertex> taskVertexSet) {
    Map<String, Double> diskMap = taskAttributes.getTaskDiskMap(taskVertexSet);
    HashMap<Integer, Map<TaskInstanceId, Double>> instancesDiskContainerMap = new HashMap<>();
    for (int containerId : containerInstanceAllocationMap.keySet()) {
        Double usedDiskValue = 0.0;
        List<TaskInstanceId> taskInstanceIds = containerInstanceAllocationMap.get(containerId);
        Map<TaskInstanceId, Double> containerDisk = new HashMap<>();
        instancesDiskContainerMap.put(containerId, containerDisk);
        List<TaskInstanceId> instancesToBeCalculated = new ArrayList<>();
        for (TaskInstanceId taskInstanceId : taskInstanceIds) {
            String taskName = taskInstanceId.getTaskName();
            if (diskMap.containsKey(taskName)) {
                Double diskValue = diskMap.get(taskName);
                containerDisk.put(taskInstanceId, diskValue);
                usedDiskValue += diskValue;
            } else {
                instancesToBeCalculated.add(taskInstanceId);
            }
        }
        Double containerDiskValue = getContainerDiskValue(containerInstanceAllocationMap);
        int instancesAllocationSize = instancesToBeCalculated.size();
        if (instancesAllocationSize != 0) {
            double instanceRequiredDisk = 0.0;
            if (!containerDiskValue.equals(NOT_SPECIFIED_NUMBER_VALUE)) {
                double remainingDisk = containerDiskValue - DEFAULT_DISK_PADDING_PER_CONTAINER - usedDiskValue;
                instanceRequiredDisk = remainingDisk / instancesAllocationSize;
            }
            for (TaskInstanceId taskInstanceId : instancesToBeCalculated) {
                containerDisk.put(taskInstanceId, instanceRequiredDisk);
            }
            LOG.fine("Instances Required Disk:\t" + instanceRequiredDisk);
        }
    }
    return instancesDiskContainerMap;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TaskInstanceId(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId) Map(java.util.Map) HashMap(java.util.HashMap)

Example 13 with TaskInstanceId

use of edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId in project twister2 by DSC-SPIDAL.

the class BatchTaskScheduler method schedule.

/**
 * This is the base method which receives the dataflow taskgraph and the worker plan to allocate
 * the task instances to the appropriate workers with their required ram, disk, and cpu values.
 *
 * @param computeGraph
 * @param workerPlan   worker plan
 * @return
 */
@Override
public TaskSchedulePlan schedule(ComputeGraph computeGraph, WorkerPlan workerPlan) {
    // Allocate the task instances into the containers/workers
    Set<WorkerSchedulePlan> workerSchedulePlans = new LinkedHashSet<>();
    // To get the vertex set from the Collectible Name Settaskgraph
    Set<Vertex> taskVertexSet = new LinkedHashSet<>(computeGraph.getTaskVertexSet());
    // Allocate the task instances into the logical containers.
    Map<Integer, List<TaskInstanceId>> batchContainerInstanceMap = batchSchedulingAlgorithm(computeGraph, workerPlan.getNumberOfWorkers());
    TaskInstanceMapCalculation instanceMapCalculation = new TaskInstanceMapCalculation(this.instanceRAM, this.instanceCPU, this.instanceDisk);
    Map<Integer, Map<TaskInstanceId, Double>> instancesRamMap = instanceMapCalculation.getInstancesRamMapInContainer(batchContainerInstanceMap, taskVertexSet);
    Map<Integer, Map<TaskInstanceId, Double>> instancesDiskMap = instanceMapCalculation.getInstancesDiskMapInContainer(batchContainerInstanceMap, taskVertexSet);
    Map<Integer, Map<TaskInstanceId, Double>> instancesCPUMap = instanceMapCalculation.getInstancesCPUMapInContainer(batchContainerInstanceMap, taskVertexSet);
    for (int containerId : batchContainerInstanceMap.keySet()) {
        double containerRAMValue = TaskSchedulerContext.containerRamPadding(config);
        double containerDiskValue = TaskSchedulerContext.containerDiskPadding(config);
        double containerCpuValue = TaskSchedulerContext.containerCpuPadding(config);
        List<TaskInstanceId> taskTaskInstanceIds = batchContainerInstanceMap.get(containerId);
        Map<TaskInstanceId, TaskInstancePlan> taskInstancePlanMap = new HashMap<>();
        for (TaskInstanceId id : taskTaskInstanceIds) {
            double instanceRAMValue = instancesRamMap.get(containerId).get(id);
            double instanceDiskValue = instancesDiskMap.get(containerId).get(id);
            double instanceCPUValue = instancesCPUMap.get(containerId).get(id);
            Resource instanceResource = new Resource(instanceRAMValue, instanceDiskValue, instanceCPUValue);
            taskInstancePlanMap.put(id, new TaskInstancePlan(id.getTaskName(), id.getTaskId(), id.getTaskIndex(), instanceResource));
            containerRAMValue += instanceRAMValue;
            containerDiskValue += instanceDiskValue;
            containerCpuValue += instanceDiskValue;
        }
        Worker worker = workerPlan.getWorker(containerId);
        Resource containerResource;
        // Create the container resource value based on the worker plan
        if (worker != null && worker.getCpu() > 0 && worker.getDisk() > 0 && worker.getRam() > 0) {
            containerResource = new Resource((double) worker.getRam(), (double) worker.getDisk(), (double) worker.getCpu());
        } else {
            containerResource = new Resource(containerRAMValue, containerDiskValue, containerCpuValue);
        }
        // Schedule the task instance plan into the task container plan.
        WorkerSchedulePlan taskWorkerSchedulePlan = new WorkerSchedulePlan(containerId, new LinkedHashSet<>(taskInstancePlanMap.values()), containerResource);
        workerSchedulePlans.add(taskWorkerSchedulePlan);
        if (dependentGraphs && index == 0) {
            workerIdList.add(containerId);
        }
    }
    index++;
    TaskSchedulePlan taskSchedulePlan = new TaskSchedulePlan(0, workerSchedulePlans);
    if (workerId == 0) {
        Map<Integer, WorkerSchedulePlan> containersMap = taskSchedulePlan.getContainersMap();
        for (Map.Entry<Integer, WorkerSchedulePlan> entry : containersMap.entrySet()) {
            Integer integer = entry.getKey();
            WorkerSchedulePlan workerSchedulePlan = entry.getValue();
            Set<TaskInstancePlan> containerPlanTaskInstances = workerSchedulePlan.getTaskInstances();
            LOG.fine("Graph Name:" + computeGraph.getGraphName() + "\tcontainer id:" + integer);
            for (TaskInstancePlan ip : containerPlanTaskInstances) {
                LOG.fine("Task Id:" + ip.getTaskId() + "\tIndex" + ip.getTaskIndex() + "\tName:" + ip.getTaskName());
            }
        }
    }
    return taskSchedulePlan;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) Vertex(edu.iu.dsc.tws.api.compute.graph.Vertex) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TaskSchedulePlan(edu.iu.dsc.tws.api.compute.schedule.elements.TaskSchedulePlan) WorkerSchedulePlan(edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan) TaskInstanceMapCalculation(edu.iu.dsc.tws.tsched.spi.taskschedule.TaskInstanceMapCalculation) TaskInstancePlan(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstancePlan) Worker(edu.iu.dsc.tws.api.compute.schedule.elements.Worker) ArrayList(java.util.ArrayList) List(java.util.List) Resource(edu.iu.dsc.tws.api.compute.schedule.elements.Resource) TaskInstanceId(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map)

Example 14 with TaskInstanceId

use of edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId in project twister2 by DSC-SPIDAL.

the class BatchTaskScheduler method batchSchedulingAlgorithm.

private Map<Integer, List<TaskInstanceId>> batchSchedulingAlgorithm(ComputeGraph graph, int numberOfContainers) throws TaskSchedulerException {
    Set<Vertex> taskVertexSet = new LinkedHashSet<>(graph.getTaskVertexSet());
    TreeSet<Vertex> orderedTaskSet = new TreeSet<>(new VertexComparator());
    orderedTaskSet.addAll(taskVertexSet);
    IntStream.range(0, numberOfContainers).forEach(i1 -> batchTaskAllocation.put(i1, new ArrayList<>()));
    int globalTaskIndex = 0;
    if (dependentGraphs) {
        for (Vertex vertex : taskVertexSet) {
            INode iNode = vertex.getTask();
            if (iNode instanceof Receptor) {
                validateReceptor(graph, vertex);
            }
            dependentTaskWorkerAllocation(graph, vertex, numberOfContainers, globalTaskIndex);
            globalTaskIndex++;
        }
    } else {
        for (Vertex vertex : taskVertexSet) {
            INode iNode = vertex.getTask();
            if (iNode instanceof Collector) {
                ((Collector) iNode).getCollectibleNames().forEach(key -> collectibleNameMap.put(key, vertex.getParallelism()));
            } else if (iNode instanceof Receptor) {
                ((Receptor) iNode).getReceivableNames().forEach(key -> receivableNameMap.put(key, vertex.getParallelism()));
                validateParallelism();
            }
            independentTaskWorkerAllocation(graph, vertex, numberOfContainers, globalTaskIndex);
            globalTaskIndex++;
        }
    }
    return batchTaskAllocation;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) IntStream(java.util.stream.IntStream) TaskInstanceId(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId) ComputeGraph(edu.iu.dsc.tws.api.compute.graph.ComputeGraph) TaskAttributes(edu.iu.dsc.tws.tsched.utils.TaskAttributes) INode(edu.iu.dsc.tws.api.compute.nodes.INode) HashMap(java.util.HashMap) Config(edu.iu.dsc.tws.api.config.Config) TaskSchedulerContext(edu.iu.dsc.tws.tsched.spi.common.TaskSchedulerContext) Resource(edu.iu.dsc.tws.api.compute.schedule.elements.Resource) TreeSet(java.util.TreeSet) Twister2RuntimeException(edu.iu.dsc.tws.api.exceptions.Twister2RuntimeException) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) WorkerSchedulePlan(edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan) TaskInstanceMapCalculation(edu.iu.dsc.tws.tsched.spi.taskschedule.TaskInstanceMapCalculation) TaskSchedulePlan(edu.iu.dsc.tws.api.compute.schedule.elements.TaskSchedulePlan) Receptor(edu.iu.dsc.tws.api.compute.modifiers.Receptor) Map(java.util.Map) TaskInstancePlan(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstancePlan) Collector(edu.iu.dsc.tws.api.compute.modifiers.Collector) LinkedHashSet(java.util.LinkedHashSet) Set(java.util.Set) Logger(java.util.logging.Logger) Vertex(edu.iu.dsc.tws.api.compute.graph.Vertex) List(java.util.List) ITaskScheduler(edu.iu.dsc.tws.api.compute.schedule.ITaskScheduler) Worker(edu.iu.dsc.tws.api.compute.schedule.elements.Worker) WorkerPlan(edu.iu.dsc.tws.api.compute.schedule.elements.WorkerPlan) Edge(edu.iu.dsc.tws.api.compute.graph.Edge) Comparator(java.util.Comparator) TaskSchedulerException(edu.iu.dsc.tws.api.compute.exceptions.TaskSchedulerException) Vertex(edu.iu.dsc.tws.api.compute.graph.Vertex) INode(edu.iu.dsc.tws.api.compute.nodes.INode) Receptor(edu.iu.dsc.tws.api.compute.modifiers.Receptor) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Collector(edu.iu.dsc.tws.api.compute.modifiers.Collector)

Example 15 with TaskInstanceId

use of edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId in project twister2 by DSC-SPIDAL.

the class BatchTaskScheduler method dependentTaskWorkerAllocation.

/**
 * This method is for allocating the multiple dependent task graphs. First, it stores the
 * scheduled worker list in the list for scheduling the next task graphs. It gets invoked
 * when all the task graphs are connected together.
 */
private void dependentTaskWorkerAllocation(ComputeGraph graph, Vertex vertex, int numberOfContainers, int globalTaskIndex) {
    int totalTaskInstances;
    if (graph.getNodeConstraints().isEmpty()) {
        totalTaskInstances = taskAttributes.getTotalNumberOfInstances(vertex);
        String task = vertex.getName();
        int containerIndex;
        for (int i = 0; i < totalTaskInstances; i++) {
            if (workerIdList.size() == 0) {
                containerIndex = i % numberOfContainers;
            } else {
                containerIndex = i % workerIdList.size();
            }
            batchTaskAllocation.get(containerIndex).add(new TaskInstanceId(task, globalTaskIndex, i));
        }
    } else {
        totalTaskInstances = taskAttributes.getTotalNumberOfInstances(vertex, graph.getNodeConstraints());
        int instancesPerWorker = taskAttributes.getInstancesPerWorker(graph.getGraphConstraints());
        int maxTaskInstancesPerContainer = 0;
        int containerIndex;
        for (int i = 0; i < totalTaskInstances; i++) {
            if (workerIdList.size() == 0) {
                containerIndex = i % numberOfContainers;
            } else {
                containerIndex = i % workerIdList.size();
            }
            if (maxTaskInstancesPerContainer < instancesPerWorker) {
                batchTaskAllocation.get(containerIndex).add(new TaskInstanceId(vertex.getName(), globalTaskIndex, i));
                ++maxTaskInstancesPerContainer;
            } else {
                throw new TaskSchedulerException("Task Scheduling couldn't be possible for the present" + "configuration, please check the number of workers maximum instances per worker");
            }
        }
    }
}
Also used : TaskInstanceId(edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId) TaskSchedulerException(edu.iu.dsc.tws.api.compute.exceptions.TaskSchedulerException)

Aggregations

TaskInstanceId (edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstanceId)21 HashMap (java.util.HashMap)15 Map (java.util.Map)15 ArrayList (java.util.ArrayList)13 LinkedHashMap (java.util.LinkedHashMap)12 Vertex (edu.iu.dsc.tws.api.compute.graph.Vertex)10 List (java.util.List)10 Resource (edu.iu.dsc.tws.api.compute.schedule.elements.Resource)9 TaskInstancePlan (edu.iu.dsc.tws.api.compute.schedule.elements.TaskInstancePlan)9 LinkedHashSet (java.util.LinkedHashSet)9 TaskSchedulerException (edu.iu.dsc.tws.api.compute.exceptions.TaskSchedulerException)8 WorkerSchedulePlan (edu.iu.dsc.tws.api.compute.schedule.elements.WorkerSchedulePlan)8 TaskSchedulePlan (edu.iu.dsc.tws.api.compute.schedule.elements.TaskSchedulePlan)7 Worker (edu.iu.dsc.tws.api.compute.schedule.elements.Worker)7 TaskInstanceMapCalculation (edu.iu.dsc.tws.tsched.spi.taskschedule.TaskInstanceMapCalculation)7 TreeSet (java.util.TreeSet)5 TaskAttributes (edu.iu.dsc.tws.tsched.utils.TaskAttributes)4 HashSet (java.util.HashSet)4 DataTransferTimeCalculator (edu.iu.dsc.tws.tsched.utils.DataTransferTimeCalculator)3 Set (java.util.Set)3