Search in sources :

Example 1 with AllReduce

use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.

the class IterativeComQueueTest method testICQLinearRegression1.

@Test
public void testICQLinearRegression1() throws Exception {
    final int m = 1000;
    final int n = 20;
    List<Tuple2<DenseVector, Double>> data = new ArrayList<>();
    for (int i = 0; i < m; ++i) {
        DenseVector feature = DenseVector.rand(n);
        data.add(Tuple2.of(feature.append(1.0), feature.dot(DenseVector.ones(n))));
    }
    DataSet<Tuple2<DenseVector, Double>> trainData = MLEnvironmentFactory.getDefault().getExecutionEnvironment().fromCollection(data).rebalance();
    DataSet<DenseVector> initialCoefs = MLEnvironmentFactory.getDefault().getExecutionEnvironment().fromCollection(Collections.singletonList(DenseVector.zeros(n + 1)));
    DataSet<Double> sampleCount = DataSetUtils.countElementsPerPartition(trainData).sum(1).map(new MapFunction<Tuple2<Integer, Long>, Double>() {

        private static final long serialVersionUID = 4461084761046487279L;

        @Override
        public Double map(Tuple2<Integer, Long> value) throws Exception {
            return value.f1.doubleValue();
        }
    });
    final double learningRate = 1.0;
    final double decayLrRate = 0.8;
    final double decayStep = 5;
    DataSet<Row> model = new IterativeComQueue().setMaxIter(1000).initWithPartitionedData(TRAIN_DATA, trainData).initWithBroadcastData(COEFS, initialCoefs).initWithBroadcastData(SAMPLE_COUNT, sampleCount).add(new UpdateCoefs(learningRate, decayStep, decayLrRate)).add(new AllReduce(COEFS_ARRAY)).closeWith(new SerializeModel()).exec();
    List<Row> modelL = model.collect();
    Assert.assertEquals(data.get(0).f1, data.get(0).f0.dot((Vector) modelL.get(0).getField(0)), 2.0);
}
Also used : ArrayList(java.util.ArrayList) Vector(com.alibaba.alink.common.linalg.Vector) DenseVector(com.alibaba.alink.common.linalg.DenseVector) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Row(org.apache.flink.types.Row) DenseVector(com.alibaba.alink.common.linalg.DenseVector) ICQTest(com.alibaba.alink.testutil.categories.ICQTest) Test(org.junit.Test)

Example 2 with AllReduce

use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.

the class LdaTrainBatchOp method online.

private void online(Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat, int numTopic, int numIter, double alpha, double beta, DataSet<DocCountVectorizerModelData> resDocCountModel, int gammaShape, Integer seed) {
    if (beta == -1) {
        beta = 1.0 / numTopic;
    }
    if (alpha == -1) {
        alpha = 1.0 / numTopic;
    }
    double learningOffset = getParams().get(ONLINE_LEARNING_OFFSET);
    double learningDecay = getParams().get(LEARNING_DECAY);
    double subSamplingRate = getParams().get(SUBSAMPLING_RATE);
    boolean optimizeDocConcentration = getParams().get(OPTIMIZE_DOC_CONCENTRATION);
    DataSet<Vector> data = dataAndStat.f0;
    DataSet<Tuple2<Long, Integer>> shape = dataAndStat.f1.map(new MapFunction<BaseVectorSummary, Tuple2<Long, Integer>>() {

        private static final long serialVersionUID = 1305270477796787466L;

        @Override
        public Tuple2<Long, Integer> map(BaseVectorSummary srt) {
            return new Tuple2<>(srt.count(), srt.vectorSize());
        }
    });
    DataSet<Tuple2<DenseMatrix, DenseMatrix>> initModel = data.mapPartition(new OnlineInit(numTopic, gammaShape, alpha, seed)).name("init lambda").withBroadcastSet(shape, LdaVariable.shape);
    DataSet<Row> ldaModelData = new IterativeComQueue().initWithPartitionedData(LdaVariable.data, data).initWithBroadcastData(LdaVariable.shape, shape).initWithBroadcastData(LdaVariable.initModel, initModel).add(new OnlineCorpusStep(numTopic, subSamplingRate, gammaShape, seed)).add(new AllReduce(LdaVariable.wordTopicStat)).add(new AllReduce(LdaVariable.logPhatPart)).add(new AllReduce(LdaVariable.nonEmptyWordCount)).add(new AllReduce(LdaVariable.nonEmptyDocCount)).add(new UpdateLambdaAndAlpha(numTopic, learningOffset, learningDecay, subSamplingRate, optimizeDocConcentration, beta)).add(new OnlineLogLikelihood(beta, numTopic, numIter, gammaShape, seed)).add(new AllReduce(LdaVariable.logLikelihood)).closeWith(new BuildOnlineLdaModel(numTopic, beta)).setMaxIter(numIter).exec();
    DataSet<Row> model = ldaModelData.flatMap(new BuildResModel(seed)).withBroadcastSet(resDocCountModel, "DocCountModel");
    setOutput(model, new LdaModelDataConverter().getModelSchema());
    saveWordTopicModelAndPerplexity(model, numTopic, true);
}
Also used : IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) UpdateLambdaAndAlpha(com.alibaba.alink.operator.common.clustering.lda.UpdateLambdaAndAlpha) OnlineLogLikelihood(com.alibaba.alink.operator.common.clustering.lda.OnlineLogLikelihood) BuildOnlineLdaModel(com.alibaba.alink.operator.common.clustering.lda.BuildOnlineLdaModel) Tuple2(org.apache.flink.api.java.tuple.Tuple2) OnlineCorpusStep(com.alibaba.alink.operator.common.clustering.lda.OnlineCorpusStep) LdaModelDataConverter(com.alibaba.alink.operator.common.clustering.LdaModelDataConverter) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) Row(org.apache.flink.types.Row) Vector(com.alibaba.alink.common.linalg.Vector) SparseVector(com.alibaba.alink.common.linalg.SparseVector)

Example 3 with AllReduce

use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.

the class Lbfgs method optimize.

/**
 * optimizer api.
 *
 * @return the coefficient of linear problem.
 */
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
    // get parameters.
    int maxIter = params.get(LinearTrainParams.MAX_ITER);
    int numSearchStep = params.get(HasNumSearchStepDv4.NUM_SEARCH_STEP);
    checkInitCoef();
    /**
     * solving problem using iteration.
     * trainData is the distributed samples.
     * initCoef is the initial model coefficient, which will be broadcast to every worker.
     * objFuncSet is the object function in dataSet format
     * .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
     * .add(new PreallocateCoefficient(OptimName.minCoef))     allocate memory for min loss coefficient
     * .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
     * .add(new PreallocateVector(OptimName.dir ...))          allocate memory for dir
     * .add(new PreallocateVector(OptimName.grad))             allocate memory for grad
     * .add(new PreallocateSkyk())                             allocate memory for sK yK
     * .add(new CalcGradient(objFunc))                         calculate local sub gradient
     * .add(new AllReduce(OptimName.gradAllReduce))            sum all sub gradient with allReduce
     * .add(new CalDirection())                                get summed gradient and use it to calc descend dir
     * .add(new CalcLosses(objFunc, OptimMethod.GD))           calculate local losses for line search
     * .add(new AllReduce(OptimName.lossAllReduce))            sum all losses with allReduce
     * .add(new UpdateModel(maxIter, epsilon ...))             update coefficient
     * .setCompareCriterionOfNode0(new IterTermination())             judge stop of iteration
     */
    DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[] { 0.0, OptimVariable.learningRate })).add(new PreallocateVector(OptimVariable.grad)).add(new PreallocateSkyk(OptimVariable.numCorrections)).add(new CalcGradient()).add(new AllReduce(OptimVariable.gradAllReduce)).add(new CalDirection(OptimVariable.numCorrections)).add(new CalcLosses(LinearTrainParams.OptimMethod.LBFGS, numSearchStep)).add(new AllReduce(OptimVariable.lossAllReduce)).add(new UpdateModel(params, OptimVariable.grad, LinearTrainParams.OptimMethod.LBFGS, numSearchStep)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
    return model.mapPartition(new ParseRowModel());
}
Also used : IterTermination(com.alibaba.alink.operator.common.optim.subfunc.IterTermination) PreallocateCoefficient(com.alibaba.alink.operator.common.optim.subfunc.PreallocateCoefficient) IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) CalcGradient(com.alibaba.alink.operator.common.optim.subfunc.CalcGradient) OutputModel(com.alibaba.alink.operator.common.optim.subfunc.OutputModel) ParseRowModel(com.alibaba.alink.operator.common.optim.subfunc.ParseRowModel) PreallocateVector(com.alibaba.alink.operator.common.optim.subfunc.PreallocateVector) CalcLosses(com.alibaba.alink.operator.common.optim.subfunc.CalcLosses) PreallocateConvergenceInfo(com.alibaba.alink.operator.common.optim.subfunc.PreallocateConvergenceInfo) PreallocateSkyk(com.alibaba.alink.operator.common.optim.subfunc.PreallocateSkyk) Row(org.apache.flink.types.Row) UpdateModel(com.alibaba.alink.operator.common.optim.subfunc.UpdateModel)

Example 4 with AllReduce

use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.

the class Newton method optimize.

/**
 * optimizer api.
 *
 * @return the coefficient of linear problem.
 */
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
    // get parameters.
    int maxIter = params.get(LinearTrainParams.MAX_ITER);
    double epsilon = params.get(LinearTrainParams.EPSILON);
    checkInitCoef();
    /**
     * solve problem using iteration.
     * trainData is the distributed samples.
     * initCoef is the initial model coefficient, which will be broadcast to every worker.
     * objFuncSet is the object function in dataSet format
     *
     * .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
     * .add(new PreallocateCoefficient(OptimName.minCoef))     allocate memory for min loss coefficient
     * .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
     * .add(new PreallocateVector(OptimName.dir ...))          allocate memory for grad
     * ..add(new PreallocateMatrix(OptimName.hessian,...))     allocate memory for hessian matrix
     * .add(new CalcGradientAndHessian(objFunc))               calculate local sub gradient and hessian
     * .add(new AllReduce(OptimName.gradAllReduce))            sum all sub gradient and hessian with allReduce
     * .add(new GetGradientAndHessian())                       get summed gradient and hessian
     * .add(new UpdateModel(maxIter, epsilon ...))             update coefficient with gradient and hessian
     * .setCompareCriterionOfNode0(new IterTermination())             judge stop of iteration
     */
    DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[2])).add(new PreallocateMatrix(OptimVariable.hessian, MAX_FEATURE_NUM)).add(new CalcGradientAndHessian()).add(new AllReduce(OptimVariable.gradHessAllReduce)).add(new GetGradeintAndHessian()).add(new UpdateModel(maxIter, epsilon)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
    return model.mapPartition(new ParseRowModel());
}
Also used : IterTermination(com.alibaba.alink.operator.common.optim.subfunc.IterTermination) PreallocateCoefficient(com.alibaba.alink.operator.common.optim.subfunc.PreallocateCoefficient) IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) PreallocateMatrix(com.alibaba.alink.operator.common.optim.subfunc.PreallocateMatrix) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) OutputModel(com.alibaba.alink.operator.common.optim.subfunc.OutputModel) ParseRowModel(com.alibaba.alink.operator.common.optim.subfunc.ParseRowModel) PreallocateVector(com.alibaba.alink.operator.common.optim.subfunc.PreallocateVector) PreallocateConvergenceInfo(com.alibaba.alink.operator.common.optim.subfunc.PreallocateConvergenceInfo) Row(org.apache.flink.types.Row)

Example 5 with AllReduce

use of com.alibaba.alink.common.comqueue.communication.AllReduce in project Alink by alibaba.

the class Owlqn method optimize.

/**
 * optimizer api.
 *
 * @return the coefficient of linear problem.
 */
@Override
public DataSet<Tuple2<DenseVector, double[]>> optimize() {
    // get parameters.
    int maxIter = params.get(LinearTrainParams.MAX_ITER);
    checkInitCoef();
    int numSearchStep = params.get(HasNumSearchStepDv4.NUM_SEARCH_STEP);
    /**
     * solving problem using iteration.
     * trainData is the distributed samples.
     * initCoef is the initial model coefficient, which will be broadcast to every worker.
     * objFuncSet is the object function in dataSet format
     *
     * .add(new PreallocateCoefficient(OptimName.currentCoef)) allocate memory for current coefficient
     * .add(new PreallocateCoefficient(OptimName.minCoef))     allocate memory for min loss coefficient
     * .add(new PreallocateLossCurve(OptimVariable.convergenceInfo)) allocate memory for loss values
     * .add(new PreallocateVector(OptimName.dir ...))          allocate memory for descend direction
     * .add(new PreallocateVector(OptimName.grad))             allocate memory for gradient
     * .add(new PreallocateSkyk())                             allocate memory for sK yK
     * .add(new CalcGradient(objFunc))                         calculate local sub gradient
     * .add(new AllReduce(OptimName.gradAllReduce))            sum all sub gradient with allReduce
     * .add(new CalDirection())                                get summed gradient and use it to calc descend dir
     * .add(new CalcLosses(objFunc, OptimMethod.GD))           calculate local losses for line search
     * .add(new AllReduce(OptimName.lossAllReduce))            sum all losses with allReduce
     * .add(new UpdateModel(maxIter, epsilon ...))             update coefficient
     * .setCompareCriterionOfNode0(new IterTermination())             judge stop of iteration
     */
    DataSet<Row> model = new IterativeComQueue().initWithPartitionedData(OptimVariable.trainData, trainData).initWithBroadcastData(OptimVariable.model, coefVec).initWithBroadcastData(OptimVariable.objFunc, objFuncSet).add(new PreallocateCoefficient(OptimVariable.currentCoef)).add(new PreallocateCoefficient(OptimVariable.minCoef)).add(new PreallocateConvergenceInfo(OptimVariable.convergenceInfo, maxIter)).add(new PreallocateVector(OptimVariable.dir, new double[] { 0.0, OptimVariable.learningRate })).add(new PreallocateVector(OptimVariable.grad)).add(new PreallocateVector(OptimVariable.pseGrad)).add(new PreallocateSkyk(OptimVariable.numCorrections)).add(new CalcGradient()).add(new AllReduce(OptimVariable.gradAllReduce)).add(new CalDirection(params.get(HasL1.L_1), OptimVariable.numCorrections)).add(new CalcLosses(LinearTrainParams.OptimMethod.OWLQN, numSearchStep)).add(new AllReduce(OptimVariable.lossAllReduce)).add(new UpdateModel(params, OptimVariable.grad, LinearTrainParams.OptimMethod.OWLQN, numSearchStep)).setCompareCriterionOfNode0(new IterTermination()).closeWith(new OutputModel()).setMaxIter(maxIter).exec();
    return model.mapPartition(new ParseRowModel());
}
Also used : IterTermination(com.alibaba.alink.operator.common.optim.subfunc.IterTermination) PreallocateCoefficient(com.alibaba.alink.operator.common.optim.subfunc.PreallocateCoefficient) IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) CalcGradient(com.alibaba.alink.operator.common.optim.subfunc.CalcGradient) OutputModel(com.alibaba.alink.operator.common.optim.subfunc.OutputModel) ParseRowModel(com.alibaba.alink.operator.common.optim.subfunc.ParseRowModel) PreallocateVector(com.alibaba.alink.operator.common.optim.subfunc.PreallocateVector) CalcLosses(com.alibaba.alink.operator.common.optim.subfunc.CalcLosses) PreallocateConvergenceInfo(com.alibaba.alink.operator.common.optim.subfunc.PreallocateConvergenceInfo) PreallocateSkyk(com.alibaba.alink.operator.common.optim.subfunc.PreallocateSkyk) Row(org.apache.flink.types.Row) UpdateModel(com.alibaba.alink.operator.common.optim.subfunc.UpdateModel)

Aggregations

AllReduce (com.alibaba.alink.common.comqueue.communication.AllReduce)11 Row (org.apache.flink.types.Row)11 IterativeComQueue (com.alibaba.alink.common.comqueue.IterativeComQueue)9 IterTermination (com.alibaba.alink.operator.common.optim.subfunc.IterTermination)5 OutputModel (com.alibaba.alink.operator.common.optim.subfunc.OutputModel)5 ParseRowModel (com.alibaba.alink.operator.common.optim.subfunc.ParseRowModel)5 PreallocateCoefficient (com.alibaba.alink.operator.common.optim.subfunc.PreallocateCoefficient)5 PreallocateConvergenceInfo (com.alibaba.alink.operator.common.optim.subfunc.PreallocateConvergenceInfo)5 PreallocateVector (com.alibaba.alink.operator.common.optim.subfunc.PreallocateVector)5 Vector (com.alibaba.alink.common.linalg.Vector)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 DenseVector (com.alibaba.alink.common.linalg.DenseVector)3 CalcGradient (com.alibaba.alink.operator.common.optim.subfunc.CalcGradient)3 CalcLosses (com.alibaba.alink.operator.common.optim.subfunc.CalcLosses)3 UpdateModel (com.alibaba.alink.operator.common.optim.subfunc.UpdateModel)3 SparseVector (com.alibaba.alink.common.linalg.SparseVector)2 LdaModelDataConverter (com.alibaba.alink.operator.common.clustering.LdaModelDataConverter)2 PreallocateSkyk (com.alibaba.alink.operator.common.optim.subfunc.PreallocateSkyk)2 BaseVectorSummary (com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary)2 ICQTest (com.alibaba.alink.testutil.categories.ICQTest)2