use of edu.iu.dsc.tws.task.impl.ComputeConnection in project twister2 by DSC-SPIDAL.
the class MultiComputeTasksGraphExample method execute.
@Override
public void execute() {
LOG.log(Level.INFO, "Task worker starting: " + workerId);
ComputeGraphBuilder builder = ComputeGraphBuilder.newBuilder(config);
int parallel = Integer.parseInt((String) config.get(DataObjectConstants.PARALLELISM_VALUE));
SourceTask sourceTask = new SourceTask();
FirstComputeTask firstComputeTask = new FirstComputeTask();
SecondComputeTask secondComputeTask = new SecondComputeTask();
ReduceTask reduceTask = new ReduceTask();
String dataDirectory = (String) config.get(DataObjectConstants.DINPUT_DIRECTORY) + workerId;
String centroidDirectory = (String) config.get(DataObjectConstants.CINPUT_DIRECTORY) + workerId;
int dimension = Integer.parseInt((String) config.get(DataObjectConstants.DIMENSIONS));
int numFiles = Integer.parseInt((String) config.get(DataObjectConstants.NUMBER_OF_FILES));
int dsize = Integer.parseInt((String) config.get(DataObjectConstants.DSIZE));
int csize = Integer.parseInt((String) config.get(DataObjectConstants.CSIZE));
String type = config.getStringValue(DataObjectConstants.FILE_TYPE);
LOG.info("Input Values:" + dataDirectory + centroidDirectory + dimension + numFiles);
KMeansUtils.generateDataPoints(config, dimension, numFiles, dsize, csize, dataDirectory, centroidDirectory, type);
// Adding the user-defined constraints to the graph
Map<String, String> sourceTaskConstraintsMap = new HashMap<>();
// sourceTaskConstraintsMap.put(Context.TWISTER2_MAX_TASK_INSTANCES_PER_WORKER, "2");
Map<String, String> computeTaskConstraintsMap = new HashMap<>();
// computeTaskConstraintsMap.put(Context.TWISTER2_MAX_TASK_INSTANCES_PER_WORKER, "2");
Map<String, String> sinkTaskConstraintsMap = new HashMap<>();
// sinkTaskConstraintsMap.put(Context.TWISTER2_MAX_TASK_INSTANCES_PER_WORKER, "2");
builder.addSource("source", sourceTask, parallel);
ComputeConnection firstComputeConnection = builder.addCompute("firstcompute", firstComputeTask, parallel);
ComputeConnection secondComputeConnection = builder.addCompute("secondcompute", secondComputeTask, parallel);
ComputeConnection reduceConnection = builder.addCompute("compute", reduceTask, parallel);
firstComputeConnection.direct("source").viaEdge("fdirect").withDataType(MessageTypes.OBJECT);
secondComputeConnection.direct("source").viaEdge("sdirect").withDataType(MessageTypes.OBJECT);
reduceConnection.allreduce("firstcompute").viaEdge("freduce").withReductionFunction(new Aggregator()).withDataType(MessageTypes.OBJECT).connect().allreduce("secondcompute").viaEdge("sreduce").withReductionFunction(new Aggregator()).withDataType(MessageTypes.OBJECT);
builder.setMode(OperationMode.BATCH);
// Adding graph and node level constraints
// builder.addNodeConstraints("source", sourceTaskConstraintsMap);
// builder.addNodeConstraints("firstcompute", computeTaskConstraintsMap);
// builder.addNodeConstraints("secondcompute", computeTaskConstraintsMap);
// builder.addNodeConstraints("sink", sinkTaskConstraintsMap);
builder.addGraphConstraints(Context.TWISTER2_MAX_TASK_INSTANCES_PER_WORKER, "4");
ComputeGraph graph = builder.build();
LOG.info("%%% Graph Constraints:%%%" + graph.getGraphConstraints());
ExecutionPlan plan = taskExecutor.plan(graph);
taskExecutor.execute(graph, plan);
}
use of edu.iu.dsc.tws.task.impl.ComputeConnection in project twister2 by DSC-SPIDAL.
the class SvmSgdAdvancedRunner method executeTrainingGraph.
/**
* This method executes the training graph
* Training is done in parallel depending on the parallelism factor given
* In this implementation the data loading parallelism and data computing or
* training parallelism is same. It is the general model to keep them equal. But
* you can increase the parallelism the way you want. But it is adviced to keep these
* values equal. Dynamic parallelism in training is not yet tested fully in Twister2 Framework.
*
* @return Twister2 DataObject{@literal <double[]>} containing the reduced weight vector
*/
public DataObject<double[]> executeTrainingGraph() {
DataObject<double[]> trainedWeight = null;
dataStreamer = new InputDataStreamer(this.operationMode, svmJobParameters.isDummy(), this.binaryBatchModel);
svmCompute = new SVMCompute(this.binaryBatchModel, this.operationMode);
svmReduce = new SVMReduce(this.operationMode);
trainingBuilder.addSource(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, dataStreamer, dataStreamerParallelism);
ComputeConnection svmComputeConnection = trainingBuilder.addCompute(Constants.SimpleGraphConfig.SVM_COMPUTE, svmCompute, svmComputeParallelism);
ComputeConnection svmReduceConnection = trainingBuilder.addCompute(Constants.SimpleGraphConfig.SVM_REDUCE, svmReduce, reduceParallelism);
svmComputeConnection.direct(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE).viaEdge(Constants.SimpleGraphConfig.DATA_EDGE).withDataType(MessageTypes.OBJECT);
// svmReduceConnection
// .reduce(Constants.SimpleGraphConfig.SVM_COMPUTE, Constants.SimpleGraphConfig.REDUCE_EDGE,
// new ReduceAggregator(), DataType.OBJECT);
svmReduceConnection.allreduce(Constants.SimpleGraphConfig.SVM_COMPUTE).viaEdge(Constants.SimpleGraphConfig.REDUCE_EDGE).withReductionFunction(new ReduceAggregator()).withDataType(MessageTypes.OBJECT);
trainingBuilder.setMode(operationMode);
ComputeGraph graph = trainingBuilder.build();
graph.setGraphName("training-graph");
ExecutionPlan plan = taskExecutor.plan(graph);
taskExecutor.addInput(graph, plan, Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, Constants.SimpleGraphConfig.INPUT_DATA, trainingData);
taskExecutor.addInput(graph, plan, Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, Constants.SimpleGraphConfig.INPUT_WEIGHT_VECTOR, inputWeightVector);
taskExecutor.execute(graph, plan);
LOG.info("Task Graph Executed !!! ");
if (workerId == 0) {
trainedWeight = retrieveWeightVectorFromTaskGraph(graph, plan);
this.trainedWeightVector = trainedWeight;
}
return trainedWeight;
}
use of edu.iu.dsc.tws.task.impl.ComputeConnection in project twister2 by DSC-SPIDAL.
the class KMeansDataGeneratorTest method testUniqueSchedules1.
@Test
public void testUniqueSchedules1() throws IOException {
Config config = getConfig();
String dinputDirectory = "/tmp/testdinput";
int numFiles = 1;
int dsize = 20;
int dimension = 2;
int parallelismValue = 2;
KMeansDataGenerator.generateData("txt", new Path(dinputDirectory), numFiles, dsize, 100, dimension, config);
ComputeGraphBuilder computeGraphBuilder = ComputeGraphBuilder.newBuilder(config);
computeGraphBuilder.setTaskGraphName("kmeans");
DataObjectSource sourceTask = new DataObjectSource("direct", dinputDirectory);
DataObjectSink sinkTask = new DataObjectSink();
computeGraphBuilder.addSource("source", sourceTask, parallelismValue);
ComputeConnection computeConnection1 = computeGraphBuilder.addCompute("sink", sinkTask, parallelismValue);
computeConnection1.direct("source").viaEdge("direct").withDataType(MessageTypes.OBJECT);
computeGraphBuilder.setMode(OperationMode.BATCH);
LocalTextInputPartitioner localTextInputPartitioner = new LocalTextInputPartitioner(new Path(dinputDirectory), parallelismValue, config);
DataSource<String, ?> source = new DataSource<>(config, localTextInputPartitioner, parallelismValue);
InputSplit<String> inputSplit;
for (int i = 0; i < parallelismValue; i++) {
inputSplit = source.getNextSplit(i);
Assert.assertNotNull(inputSplit);
}
}
use of edu.iu.dsc.tws.task.impl.ComputeConnection in project twister2 by DSC-SPIDAL.
the class TaskGraphBuildTest method createGraph.
private ComputeGraph createGraph() {
TestSource testSource = new TestSource();
TestSink1 testCompute = new TestSink1();
TestSink2 testSink = new TestSink2();
ComputeGraphBuilder computeGraphBuilder = ComputeGraphBuilder.newBuilder(getConfig());
computeGraphBuilder.addSource("source", testSource, 4);
ComputeConnection computeConnection = computeGraphBuilder.addCompute("compute", testCompute, 4);
computeConnection.partition("source").viaEdge(TaskConfigurations.DEFAULT_EDGE).withDataType(MessageTypes.OBJECT);
ComputeConnection rc = computeGraphBuilder.addCompute("sink", testSink, 1);
rc.allreduce("compute").viaEdge(TaskConfigurations.DEFAULT_EDGE).withReductionFunction(new Aggregator()).withDataType(MessageTypes.OBJECT);
ComputeGraph graph = computeGraphBuilder.build();
return graph;
}
use of edu.iu.dsc.tws.task.impl.ComputeConnection in project twister2 by DSC-SPIDAL.
the class RoundRobinTaskSchedulerTest method createGraphWithGraphConstraints.
private ComputeGraph createGraphWithGraphConstraints(int parallel) {
TaskSchedulerClassTest.TestSource testSource = new TaskSchedulerClassTest.TestSource();
TaskSchedulerClassTest.TestSink testSink = new TaskSchedulerClassTest.TestSink();
ComputeGraphBuilder builder = ComputeGraphBuilder.newBuilder(Config.newBuilder().build());
builder.addSource("source", testSource, parallel);
ComputeConnection c = builder.addCompute("sink", testSink, parallel);
c.reduce("source").viaEdge("edge").withOperation(Op.SUM, MessageTypes.INTEGER_ARRAY);
builder.setMode(OperationMode.STREAMING);
builder.addGraphConstraints(Context.TWISTER2_MAX_TASK_INSTANCES_PER_WORKER, "16");
ComputeGraph graph = builder.build();
return graph;
}
Aggregations