Search in sources :

Example 16 with ExecutionPlan

use of edu.iu.dsc.tws.api.compute.executor.ExecutionPlan in project twister2 by DSC-SPIDAL.

the class TeraSort method execute.

@Override
public void execute(WorkerEnvironment workerEnv) {
    int workerID = workerEnv.getWorkerId();
    ComputeEnvironment cEnv = ComputeEnvironment.init(workerEnv);
    Config config = workerEnv.getConfig();
    resultsRecorder = new BenchmarkResultsRecorder(config, workerID == 0);
    Timing.setDefaultTimingUnit(TimingUnit.MILLI_SECONDS);
    final String filePath = config.getStringValue(ARG_INPUT_FILE, null);
    final int keySize = config.getIntegerValue(ARG_KEY_SIZE, 10);
    final int valueSize = config.getIntegerValue(ARG_VALUE_SIZE, 90);
    // Sampling Graph : if file based only
    TaskPartitioner taskPartitioner;
    if (filePath != null) {
        ComputeGraphBuilder samplingGraph = ComputeGraphBuilder.newBuilder(config);
        samplingGraph.setMode(OperationMode.BATCH);
        Sampler samplerTask = new Sampler();
        samplingGraph.addSource(TASK_SAMPLER, samplerTask, config.getIntegerValue(ARG_TASKS_SOURCES, 4));
        SamplerReduce samplerReduce = new SamplerReduce();
        samplingGraph.addCompute(TASK_SAMPLER_REDUCE, samplerReduce, config.getIntegerValue(ARG_RESOURCE_INSTANCES, 4)).allreduce(TASK_SAMPLER).viaEdge(EDGE).withReductionFunction(byte[].class, (minMax1, minMax2) -> {
            byte[] min1 = Arrays.copyOfRange(minMax1, 0, keySize);
            byte[] max1 = Arrays.copyOfRange(minMax1, keySize, minMax1.length);
            byte[] min2 = Arrays.copyOfRange(minMax2, 0, keySize);
            byte[] max2 = Arrays.copyOfRange(minMax2, keySize, minMax2.length);
            byte[] newMinMax = new byte[keySize * 2];
            byte[] min = min1;
            byte[] max = max1;
            if (ByteArrayComparator.getInstance().compare(min1, min2) > 0) {
                min = min2;
            }
            if (ByteArrayComparator.getInstance().compare(max1, max2) < 0) {
                max = max2;
            }
            System.arraycopy(min, 0, newMinMax, 0, keySize);
            System.arraycopy(max, 0, newMinMax, keySize, keySize);
            return newMinMax;
        });
        ComputeGraph sampleGraphBuild = samplingGraph.build();
        ExecutionPlan sampleTaskPlan = cEnv.getTaskExecutor().plan(sampleGraphBuild);
        cEnv.getTaskExecutor().execute(sampleGraphBuild, sampleTaskPlan);
        DataObject<byte[]> output = cEnv.getTaskExecutor().getOutput("sample-reduce");
        LOG.info("Sample output received");
        taskPartitioner = new TaskPartitionerForSampledData(output.getPartitions()[0].getConsumer().next(), keySize);
    } else {
        taskPartitioner = new TaskPartitionerForRandom();
    }
    // Sort Graph
    ComputeGraphBuilder teraSortTaskGraph = ComputeGraphBuilder.newBuilder(config);
    teraSortTaskGraph.setMode(OperationMode.BATCH);
    BaseSource dataSource;
    if (filePath == null) {
        dataSource = new RandomDataSource();
    } else {
        dataSource = new FileDataSource();
    }
    teraSortTaskGraph.addSource(TASK_SOURCE, dataSource, config.getIntegerValue(ARG_TASKS_SOURCES, 4));
    Receiver receiver = new Receiver();
    KeyedGatherConfig keyedGatherConfig = teraSortTaskGraph.addCompute(TASK_RECV, receiver, config.getIntegerValue(ARG_TASKS_SINKS, 4)).keyedGather(TASK_SOURCE).viaEdge(EDGE).withDataType(MessageTypes.BYTE_ARRAY).withKeyType(MessageTypes.BYTE_ARRAY).withTaskPartitioner(taskPartitioner).useDisk(true).sortBatchByKey(ByteArrayComparator.getInstance()).groupBatchByKey(false);
    if (config.getBooleanValue(ARG_FIXED_SCHEMA, false)) {
        LOG.info("Using fixed schema feature with message size : " + (keySize + valueSize) + " and key size : " + keySize);
        keyedGatherConfig.withMessageSchema(MessageSchema.ofSize(keySize + valueSize, keySize));
    }
    ComputeGraph computeGraph = teraSortTaskGraph.build();
    ExecutionPlan executionPlan = cEnv.getTaskExecutor().plan(computeGraph);
    cEnv.getTaskExecutor().execute(computeGraph, executionPlan);
    cEnv.close();
    LOG.info("Finished Sorting...");
}
Also used : BaseSource(edu.iu.dsc.tws.api.compute.nodes.BaseSource) KeyedGatherConfig(edu.iu.dsc.tws.task.impl.ops.KeyedGatherConfig) Config(edu.iu.dsc.tws.api.config.Config) JobConfig(edu.iu.dsc.tws.api.JobConfig) ComputeGraph(edu.iu.dsc.tws.api.compute.graph.ComputeGraph) BenchmarkResultsRecorder(edu.iu.dsc.tws.examples.utils.bench.BenchmarkResultsRecorder) ComputeGraphBuilder(edu.iu.dsc.tws.task.impl.ComputeGraphBuilder) KeyedGatherConfig(edu.iu.dsc.tws.task.impl.ops.KeyedGatherConfig) ComputeEnvironment(edu.iu.dsc.tws.task.ComputeEnvironment) ExecutionPlan(edu.iu.dsc.tws.api.compute.executor.ExecutionPlan) TaskPartitioner(edu.iu.dsc.tws.api.compute.TaskPartitioner)

Example 17 with ExecutionPlan

use of edu.iu.dsc.tws.api.compute.executor.ExecutionPlan in project twister2 by DSC-SPIDAL.

the class BatchTaskSchedulerExample method execute.

@Override
public void execute(WorkerEnvironment workerEnv) {
    int workerId = workerEnv.getWorkerId();
    Config config = workerEnv.getConfig();
    long startTime = System.currentTimeMillis();
    LOG.log(Level.FINE, "Task worker starting: " + workerId);
    ComputeEnvironment cEnv = ComputeEnvironment.init(workerEnv);
    TaskExecutor taskExecutor = cEnv.getTaskExecutor();
    // Independent Graph and it has collector
    ComputeGraph firstGraph = buildFirstGraph(2, config);
    // Dependent Graph and it has collector
    ComputeGraph secondGraph = buildSecondGraph(4, config);
    // Dependent Graph and it has receptor to receive the input from second graph or first graph
    ComputeGraph thirdGraph = buildThirdGraph(4, config);
    ComputeGraph[] computeGraphs = new ComputeGraph[] { firstGraph, secondGraph, thirdGraph };
    // Get the execution plan for the first task graph
    ExecutionPlan firstGraphExecutionPlan = taskExecutor.plan(firstGraph);
    // Get the execution plan for the second task graph
    ExecutionPlan secondGraphExecutionPlan = taskExecutor.plan(secondGraph);
    // Get the execution plan for the third task graph
    ExecutionPlan thirdGraphExecutionPlan = taskExecutor.plan(thirdGraph);
    taskExecutor.execute(firstGraph, firstGraphExecutionPlan);
    taskExecutor.execute(secondGraph, secondGraphExecutionPlan);
    taskExecutor.execute(thirdGraph, thirdGraphExecutionPlan);
    // This is to test all the three graphs as dependent
    /*Map<String, ExecutionPlan> taskExecutionPlan = taskExecutor.plan(computeGraphs);
    for (Map.Entry<String, ExecutionPlan> planEntry : taskExecutionPlan.entrySet()) {
      String graphName = planEntry.getKey();
      if (graphName.equals(computeGraphs[0].getGraphName())) {
        taskExecutor.execute(computeGraphs[0], planEntry.getValue());
      } else if (graphName.equals(computeGraphs[1].getGraphName())) {
        taskExecutor.execute(computeGraphs[1], planEntry.getValue());
      } else {
        taskExecutor.execute(computeGraphs[2], planEntry.getValue());
      }
    }*/
    cEnv.close();
    long endTime = System.currentTimeMillis();
    LOG.info("Total Execution Time: " + (endTime - startTime));
}
Also used : ComputeEnvironment(edu.iu.dsc.tws.task.ComputeEnvironment) TaskExecutor(edu.iu.dsc.tws.task.impl.TaskExecutor) ExecutionPlan(edu.iu.dsc.tws.api.compute.executor.ExecutionPlan) Config(edu.iu.dsc.tws.api.config.Config) JobConfig(edu.iu.dsc.tws.api.JobConfig) ComputeGraph(edu.iu.dsc.tws.api.compute.graph.ComputeGraph)

Example 18 with ExecutionPlan

use of edu.iu.dsc.tws.api.compute.executor.ExecutionPlan in project twister2 by DSC-SPIDAL.

the class TaskExecutor method createExecution.

/**
 * Execute a plan and a graph. This call blocks until the execution finishes. In case of
 * streaming, this call doesn't return while for batch computations it returns after
 * the execution is done.
 *
 * @param graph the dataflow graph
 */
public IExecutor createExecution(ComputeGraph graph) {
    ExecutionPlan plan = plan(graph);
    IExecutor ex = executor.getExecutor(config, plan, graph.getOperationMode(), new ExecutionHookImpl(config, dataObjectMap, plan, currentExecutors));
    currentExecutors.add(ex);
    return ex;
}
Also used : ExecutionPlan(edu.iu.dsc.tws.api.compute.executor.ExecutionPlan) IExecutor(edu.iu.dsc.tws.api.compute.executor.IExecutor)

Example 19 with ExecutionPlan

use of edu.iu.dsc.tws.api.compute.executor.ExecutionPlan in project twister2 by DSC-SPIDAL.

the class TaskExecutor method execute.

/**
 * Execute a plan and a graph. This call blocks until the execution finishes. In case of
 * streaming, this call doesn't return while for batch computations it returns after
 * the execution is done.
 *
 * @param graph the dataflow graph
 */
public void execute(ComputeGraph graph) {
    ExecutionPlan plan = plan(graph);
    execute(config, graph, plan);
}
Also used : ExecutionPlan(edu.iu.dsc.tws.api.compute.executor.ExecutionPlan)

Example 20 with ExecutionPlan

use of edu.iu.dsc.tws.api.compute.executor.ExecutionPlan in project twister2 by DSC-SPIDAL.

the class MultiStageGraph method execute.

@Override
public void execute() {
    GeneratorTask g = new GeneratorTask();
    ReduceTask rt = new ReduceTask();
    PartitionTask r = new PartitionTask();
    ComputeGraphBuilder builder = ComputeGraphBuilder.newBuilder(config);
    builder.addSource("source", g, 4);
    ComputeConnection pc = builder.addCompute("compute", r, 4);
    pc.partition("source").viaEdge("partition-edge").withDataType(MessageTypes.OBJECT);
    ComputeConnection rc = builder.addCompute("sink", rt, 1);
    rc.reduce("compute").viaEdge("compute-edge").withReductionFunction((object1, object2) -> object1);
    builder.setMode(OperationMode.BATCH);
    ComputeGraph graph = builder.build();
    graph.setGraphName("MultiTaskGraph");
    ExecutionPlan plan = taskExecutor.plan(graph);
    taskExecutor.execute(graph, plan);
}
Also used : ExecutionPlan(edu.iu.dsc.tws.api.compute.executor.ExecutionPlan) ComputeGraph(edu.iu.dsc.tws.api.compute.graph.ComputeGraph) ComputeGraphBuilder(edu.iu.dsc.tws.task.impl.ComputeGraphBuilder) ComputeConnection(edu.iu.dsc.tws.task.impl.ComputeConnection)

Aggregations

ExecutionPlan (edu.iu.dsc.tws.api.compute.executor.ExecutionPlan)37 ComputeGraph (edu.iu.dsc.tws.api.compute.graph.ComputeGraph)33 ComputeConnection (edu.iu.dsc.tws.task.impl.ComputeConnection)13 DataObject (edu.iu.dsc.tws.api.dataset.DataObject)10 ComputeGraphBuilder (edu.iu.dsc.tws.task.impl.ComputeGraphBuilder)10 IExecutor (edu.iu.dsc.tws.api.compute.executor.IExecutor)9 Config (edu.iu.dsc.tws.api.config.Config)8 ComputeEnvironment (edu.iu.dsc.tws.task.ComputeEnvironment)7 JobConfig (edu.iu.dsc.tws.api.JobConfig)5 Communicator (edu.iu.dsc.tws.api.comms.Communicator)4 TaskSchedulePlan (edu.iu.dsc.tws.api.compute.schedule.elements.TaskSchedulePlan)4 DataObjectSink (edu.iu.dsc.tws.task.dataobjects.DataObjectSink)4 DataObjectSource (edu.iu.dsc.tws.task.dataobjects.DataObjectSource)4 TaskExecutor (edu.iu.dsc.tws.task.impl.TaskExecutor)4 HashMap (java.util.HashMap)4 WorkerPlan (edu.iu.dsc.tws.api.compute.schedule.elements.WorkerPlan)3 ReduceAggregator (edu.iu.dsc.tws.examples.ml.svm.aggregate.ReduceAggregator)3 SVMReduce (edu.iu.dsc.tws.examples.ml.svm.aggregate.SVMReduce)3 CheckpointingClient (edu.iu.dsc.tws.api.checkpointing.CheckpointingClient)2 TWSChannel (edu.iu.dsc.tws.api.comms.channel.TWSChannel)2