use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.
the class IterativeComQueueTest method testICQLinearRegression1.
@Test
public void testICQLinearRegression1() throws Exception {
final int m = 1000;
final int n = 20;
List<Tuple2<DenseVector, Double>> data = new ArrayList<>();
for (int i = 0; i < m; ++i) {
DenseVector feature = DenseVector.rand(n);
data.add(Tuple2.of(feature.append(1.0), feature.dot(DenseVector.ones(n))));
}
DataSet<Tuple2<DenseVector, Double>> trainData = MLEnvironmentFactory.getDefault().getExecutionEnvironment().fromCollection(data).rebalance();
DataSet<DenseVector> initialCoefs = MLEnvironmentFactory.getDefault().getExecutionEnvironment().fromCollection(Collections.singletonList(DenseVector.zeros(n + 1)));
DataSet<Double> sampleCount = DataSetUtils.countElementsPerPartition(trainData).sum(1).map(new MapFunction<Tuple2<Integer, Long>, Double>() {
private static final long serialVersionUID = 4461084761046487279L;
@Override
public Double map(Tuple2<Integer, Long> value) throws Exception {
return value.f1.doubleValue();
}
});
final double learningRate = 1.0;
final double decayLrRate = 0.8;
final double decayStep = 5;
DataSet<Row> model = new IterativeComQueue().setMaxIter(1000).initWithPartitionedData(TRAIN_DATA, trainData).initWithBroadcastData(COEFS, initialCoefs).initWithBroadcastData(SAMPLE_COUNT, sampleCount).add(new UpdateCoefs(learningRate, decayStep, decayLrRate)).add(new AllReduce(COEFS_ARRAY)).closeWith(new SerializeModel()).exec();
List<Row> modelL = model.collect();
Assert.assertEquals(data.get(0).f1, data.get(0).f0.dot((Vector) modelL.get(0).getField(0)), 2.0);
}
use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.
the class LdaTrainBatchOp method online.
private void online(Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat, int numTopic, int numIter, double alpha, double beta, DataSet<DocCountVectorizerModelData> resDocCountModel, int gammaShape, Integer seed) {
if (beta == -1) {
beta = 1.0 / numTopic;
}
if (alpha == -1) {
alpha = 1.0 / numTopic;
}
double learningOffset = getParams().get(ONLINE_LEARNING_OFFSET);
double learningDecay = getParams().get(LEARNING_DECAY);
double subSamplingRate = getParams().get(SUBSAMPLING_RATE);
boolean optimizeDocConcentration = getParams().get(OPTIMIZE_DOC_CONCENTRATION);
DataSet<Vector> data = dataAndStat.f0;
DataSet<Tuple2<Long, Integer>> shape = dataAndStat.f1.map(new MapFunction<BaseVectorSummary, Tuple2<Long, Integer>>() {
private static final long serialVersionUID = 1305270477796787466L;
@Override
public Tuple2<Long, Integer> map(BaseVectorSummary srt) {
return new Tuple2<>(srt.count(), srt.vectorSize());
}
});
DataSet<Tuple2<DenseMatrix, DenseMatrix>> initModel = data.mapPartition(new OnlineInit(numTopic, gammaShape, alpha, seed)).name("init lambda").withBroadcastSet(shape, LdaVariable.shape);
DataSet<Row> ldaModelData = new IterativeComQueue().initWithPartitionedData(LdaVariable.data, data).initWithBroadcastData(LdaVariable.shape, shape).initWithBroadcastData(LdaVariable.initModel, initModel).add(new OnlineCorpusStep(numTopic, subSamplingRate, gammaShape, seed)).add(new AllReduce(LdaVariable.wordTopicStat)).add(new AllReduce(LdaVariable.logPhatPart)).add(new AllReduce(LdaVariable.nonEmptyWordCount)).add(new AllReduce(LdaVariable.nonEmptyDocCount)).add(new UpdateLambdaAndAlpha(numTopic, learningOffset, learningDecay, subSamplingRate, optimizeDocConcentration, beta)).add(new OnlineLogLikelihood(beta, numTopic, numIter, gammaShape, seed)).add(new AllReduce(LdaVariable.logLikelihood)).closeWith(new BuildOnlineLdaModel(numTopic, beta)).setMaxIter(numIter).exec();
DataSet<Row> model = ldaModelData.flatMap(new BuildResModel(seed)).withBroadcastSet(resDocCountModel, "DocCountModel");
setOutput(model, new LdaModelDataConverter().getModelSchema());
saveWordTopicModelAndPerplexity(model, numTopic, true);
}
use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.
the class Lbfgs method optimize.
/**
* optimizer api.
*
* @return the coefficient of linear problem.
*/
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
// get parameters.
int maxIter = params.get(LinearTrainParams.MAX_ITER);
int numSearchStep = params.get(HasNumSearchStepDv4.NUM_SEARCH_STEP);
checkInitCoef();
/**
* solving problem using iteration.
* trainData is the distributed samples.
* initCoef is the initial model coefficient, which will be broadcast to every worker.
* objFuncSet is the object function in dataSet format
* .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
* .add(new PreallocateCoefficient(OptimName.minCoef)) allocate memory for min loss coefficient
* .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
* .add(new PreallocateVector(OptimName.dir ...)) allocate memory for dir
* .add(new PreallocateVector(OptimName.grad)) allocate memory for grad
* .add(new PreallocateSkyk()) allocate memory for sK yK
* .add(new CalcGradient(objFunc)) calculate local sub gradient
* .add(new AllReduce(OptimName.gradAllReduce)) sum all sub gradient with allReduce
* .add(new CalDirection()) get summed gradient and use it to calc descend dir
* .add(new CalcLosses(objFunc, OptimMethod.GD)) calculate local losses for line search
* .add(new AllReduce(OptimName.lossAllReduce)) sum all losses with allReduce
* .add(new UpdateModel(maxIter, epsilon ...)) update coefficient
* .setCompareCriterionOfNode0(new IterTermination()) judge stop of iteration
*/
DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[] { 0.0, OptimVariable.learningRate })).add(new PreallocateVector(OptimVariable.grad)).add(new PreallocateSkyk(OptimVariable.numCorrections)).add(new CalcGradient()).add(new AllReduce(OptimVariable.gradAllReduce)).add(new CalDirection(OptimVariable.numCorrections)).add(new CalcLosses(LinearTrainParams.OptimMethod.LBFGS, numSearchStep)).add(new AllReduce(OptimVariable.lossAllReduce)).add(new UpdateModel(params, OptimVariable.grad, LinearTrainParams.OptimMethod.LBFGS, numSearchStep)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
return model.mapPartition(new ParseRowModel());
}
use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.
the class Newton method optimize.
/**
* optimizer api.
*
* @return the coefficient of linear problem.
*/
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
// get parameters.
int maxIter = params.get(LinearTrainParams.MAX_ITER);
double epsilon = params.get(LinearTrainParams.EPSILON);
checkInitCoef();
/**
* solve problem using iteration.
* trainData is the distributed samples.
* initCoef is the initial model coefficient, which will be broadcast to every worker.
* objFuncSet is the object function in dataSet format
*
* .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
* .add(new PreallocateCoefficient(OptimName.minCoef)) allocate memory for min loss coefficient
* .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
* .add(new PreallocateVector(OptimName.dir ...)) allocate memory for grad
* ..add(new PreallocateMatrix(OptimName.hessian,...)) allocate memory for hessian matrix
* .add(new CalcGradientAndHessian(objFunc)) calculate local sub gradient and hessian
* .add(new AllReduce(OptimName.gradAllReduce)) sum all sub gradient and hessian with allReduce
* .add(new GetGradientAndHessian()) get summed gradient and hessian
* .add(new UpdateModel(maxIter, epsilon ...)) update coefficient with gradient and hessian
* .setCompareCriterionOfNode0(new IterTermination()) judge stop of iteration
*/
DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[2])).add(new PreallocateMatrix(OptimVariable.hessian, MAX_FEATURE_NUM)).add(new CalcGradientAndHessian()).add(new AllReduce(OptimVariable.gradHessAllReduce)).add(new GetGradeintAndHessian()).add(new UpdateModel(maxIter, epsilon)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
return model.mapPartition(new ParseRowModel());
}
use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.
the class Owlqn method optimize.
/**
* optimizer api.
*
* @return the coefficient of linear problem.
*/
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
// get parameters.
int maxIter = params.get(LinearTrainParams.MAX_ITER);
checkInitCoef();
int numSearchStep = params.get(HasNumSearchStepDv4.NUM_SEARCH_STEP);
/**
* solving problem using iteration.
* trainData is the distributed samples.
* initCoef is the initial model coefficient, which will be broadcast to every worker.
* objFuncSet is the object function in dataSet format
*
* .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
* .add(new PreallocateCoefficient(OptimName.minCoef)) allocate memory for min loss coefficient
* .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
* .add(new PreallocateVector(OptimName.dir ...)) allocate memory for descend direction
* .add(new PreallocateVector(OptimName.grad)) allocate memory for gradient
* .add(new PreallocateSkyk()) allocate memory for sK yK
* .add(new CalcGradient(objFunc)) calculate local sub gradient
* .add(new AllReduce(OptimName.gradAllReduce)) sum all sub gradient with allReduce
* .add(new CalDirection()) get summed gradient and use it to calc descend dir
* .add(new CalcLosses(objFunc, OptimMethod.GD)) calculate local losses for line search
* .add(new AllReduce(OptimName.lossAllReduce)) sum all losses with allReduce
* .add(new UpdateModel(maxIter, epsilon ...)) update coefficient
* .setCompareCriterionOfNode0(new IterTermination()) judge stop of iteration
*/
DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[] { 0.0, OptimVariable.learningRate })).add(new PreallocateVector(OptimVariable.grad)).add(new PreallocateVector(OptimVariable.pseGrad)).add(new PreallocateSkyk(OptimVariable.numCorrections)).add(new CalcGradient()).add(new AllReduce(OptimVariable.gradAllReduce)).add(new CalDirection(params.get(HasL1.L_1), OptimVariable.numCorrections)).add(new CalcLosses(LinearTrainParams.OptimMethod.OWLQN, numSearchStep)).add(new AllReduce(OptimVariable.lossAllReduce)).add(new UpdateModel(params, OptimVariable.grad, LinearTrainParams.OptimMethod.OWLQN, numSearchStep)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
return model.mapPartition(new ParseRowModel());
}
Aggregations