use of edu.iu.dsc.tws.examples.ml.svm.compute.SVMCompute in project twister2 by DSC-SPIDAL.
the class SvmSgdAdvancedRunner method executeTrainingGraph.
/**
* This method executes the training graph
* Training is done in parallel depending on the parallelism factor given
* In this implementation the data loading parallelism and data computing or
* training parallelism is same. It is the general model to keep them equal. But
* you can increase the parallelism the way you want. But it is adviced to keep these
* values equal. Dynamic parallelism in training is not yet tested fully in Twister2 Framework.
*
* @return Twister2 DataObject{@literal <double[]>} containing the reduced weight vector
*/
public DataObject<double[]> executeTrainingGraph() {
DataObject<double[]> trainedWeight = null;
dataStreamer = new InputDataStreamer(this.operationMode, svmJobParameters.isDummy(), this.binaryBatchModel);
svmCompute = new SVMCompute(this.binaryBatchModel, this.operationMode);
svmReduce = new SVMReduce(this.operationMode);
trainingBuilder.addSource(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, dataStreamer, dataStreamerParallelism);
ComputeConnection svmComputeConnection = trainingBuilder.addCompute(Constants.SimpleGraphConfig.SVM_COMPUTE, svmCompute, svmComputeParallelism);
ComputeConnection svmReduceConnection = trainingBuilder.addCompute(Constants.SimpleGraphConfig.SVM_REDUCE, svmReduce, reduceParallelism);
svmComputeConnection.direct(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE).viaEdge(Constants.SimpleGraphConfig.DATA_EDGE).withDataType(MessageTypes.OBJECT);
// svmReduceConnection
// .reduce(Constants.SimpleGraphConfig.SVM_COMPUTE, Constants.SimpleGraphConfig.REDUCE_EDGE,
// new ReduceAggregator(), DataType.OBJECT);
svmReduceConnection.allreduce(Constants.SimpleGraphConfig.SVM_COMPUTE).viaEdge(Constants.SimpleGraphConfig.REDUCE_EDGE).withReductionFunction(new ReduceAggregator()).withDataType(MessageTypes.OBJECT);
trainingBuilder.setMode(operationMode);
ComputeGraph graph = trainingBuilder.build();
graph.setGraphName("training-graph");
ExecutionPlan plan = taskExecutor.plan(graph);
taskExecutor.addInput(graph, plan, Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, Constants.SimpleGraphConfig.INPUT_DATA, trainingData);
taskExecutor.addInput(graph, plan, Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, Constants.SimpleGraphConfig.INPUT_WEIGHT_VECTOR, inputWeightVector);
taskExecutor.execute(graph, plan);
LOG.info("Task Graph Executed !!! ");
if (workerId == 0) {
trainedWeight = retrieveWeightVectorFromTaskGraph(graph, plan);
this.trainedWeightVector = trainedWeight;
}
return trainedWeight;
}
use of edu.iu.dsc.tws.examples.ml.svm.compute.SVMCompute in project twister2 by DSC-SPIDAL.
the class SvmSgdRunner method initializeExecute.
/**
* Initializing the execute method
*/
public void initializeExecute() {
ComputeGraphBuilder builder = ComputeGraphBuilder.newBuilder(config);
this.operationMode = this.svmJobParameters.isStreaming() ? OperationMode.STREAMING : OperationMode.BATCH;
DataStreamer dataStreamer = new DataStreamer(this.operationMode, svmJobParameters.isDummy(), this.binaryBatchModel);
SVMCompute svmCompute = new SVMCompute(this.binaryBatchModel, this.operationMode);
SVMReduce svmReduce = new SVMReduce(this.operationMode);
builder.addSource(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE, dataStreamer, dataStreamerParallelism);
ComputeConnection svmComputeConnection = builder.addCompute(Constants.SimpleGraphConfig.SVM_COMPUTE, svmCompute, svmComputeParallelism);
ComputeConnection svmReduceConnection = builder.addCompute(Constants.SimpleGraphConfig.SVM_REDUCE, svmReduce, reduceParallelism);
svmComputeConnection.direct(Constants.SimpleGraphConfig.DATASTREAMER_SOURCE).viaEdge(Constants.SimpleGraphConfig.DATA_EDGE).withDataType(MessageTypes.OBJECT);
svmReduceConnection.reduce(Constants.SimpleGraphConfig.SVM_COMPUTE).viaEdge(Constants.SimpleGraphConfig.REDUCE_EDGE).withReductionFunction(new ReduceAggregator()).withDataType(MessageTypes.OBJECT);
builder.setMode(operationMode);
ComputeGraph graph = builder.build();
ExecutionPlan plan = taskExecutor.plan(graph);
taskExecutor.execute(graph, plan);
LOG.info("Task Graph Executed !!! ");
if (operationMode.equals(OperationMode.BATCH)) {
DataObject<double[]> dataSet = taskExecutor.getOutput(graph, plan, Constants.SimpleGraphConfig.SVM_REDUCE);
DataPartition<double[]> values = dataSet.getPartitions()[0];
DataPartitionConsumer<double[]> dataPartitionConsumer = values.getConsumer();
// LOG.info("Final Receive : " + dataPartitionConsumer.hasNext());
while (dataPartitionConsumer.hasNext()) {
LOG.info("Final Aggregated Values Are:" + Arrays.toString(dataPartitionConsumer.next()));
}
}
}
Aggregations