Search in sources :

Example 11 with BaseVectorSummary

use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.

the class KMeansTrainBatchOp method linkFrom.

@Override
public KMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
    BatchOperator<?> in = checkAndGetFirst(inputs);
    final int maxIter = this.getMaxIter();
    final double tol = this.getEpsilon();
    final String vectorColName = this.getVectorCol();
    final DistanceType distanceType = getDistanceType();
    FastDistance distance = distanceType.getFastDistance();
    Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> statistics = StatisticsHelper.summaryHelper(in, null, vectorColName);
    DataSet<Integer> vectorSize = statistics.f1.map(new MapFunction<BaseVectorSummary, Integer>() {

        private static final long serialVersionUID = 4184586558834055401L;

        @Override
        public Integer map(BaseVectorSummary value) {
            Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
            return value.vectorSize();
        }
    });
    DataSet<FastDistanceVectorData> data = statistics.f0.rebalance().map(new RichMapFunction<Vector, FastDistanceVectorData>() {

        private static final long serialVersionUID = -7443226889326704768L;

        private int vectorSize;

        @Override
        public void open(Configuration params) {
            vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
        }

        @Override
        public FastDistanceVectorData map(Vector value) {
            if (value instanceof SparseVector) {
                ((SparseVector) value).setSize(vectorSize);
            }
            return distance.prepareVectorData(Row.of(value), 0);
        }
    }).withBroadcastSet(vectorSize, VECTOR_SIZE);
    DataSet<FastDistanceMatrixData> initCentroid = KMeansInitCentroids.initKmeansCentroids(data, distance, this.getParams(), vectorSize, getRandomSeed());
    DataSet<Row> finalCentroid = iterateICQ(initCentroid, data, vectorSize, maxIter, tol, distance, HasKMeansWithHaversineDistanceType.DistanceType.valueOf(distanceType.name()), vectorColName, null, null);
    this.setOutput(finalCentroid, new KMeansModelDataConverter().getModelSchema());
    return this;
}
Also used : Configuration(org.apache.flink.configuration.Configuration) DataSet(org.apache.flink.api.java.DataSet) FastDistanceVectorData(com.alibaba.alink.operator.common.distance.FastDistanceVectorData) HasKMeansWithHaversineDistanceType(com.alibaba.alink.params.shared.clustering.HasKMeansWithHaversineDistanceType) SparseVector(com.alibaba.alink.common.linalg.SparseVector) FastDistance(com.alibaba.alink.operator.common.distance.FastDistance) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) Vector(com.alibaba.alink.common.linalg.Vector) SparseVector(com.alibaba.alink.common.linalg.SparseVector) KMeansModelDataConverter(com.alibaba.alink.operator.common.clustering.kmeans.KMeansModelDataConverter) FastDistanceMatrixData(com.alibaba.alink.operator.common.distance.FastDistanceMatrixData) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Row(org.apache.flink.types.Row)

Example 12 with BaseVectorSummary

use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.

the class LdaTrainBatchOp method gibbsSample.

private void gibbsSample(Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat, int numTopic, int numIter, double alpha, double beta, DataSet<DocCountVectorizerModelData> resDocCountModel, Integer seed) {
    if (beta == -1) {
        beta = 0.01 + 1;
    }
    if (alpha == -1) {
        alpha = 50.0 / numTopic + 1;
    }
    DataSet<Vector> data = dataAndStat.f0;
    DataSet<Integer> colNum = dataAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {

        private static final long serialVersionUID = -7170259222827300492L;

        @Override
        public Integer map(BaseVectorSummary srt) {
            return srt.vectorSize();
        }
    });
    DataSet<Row> ldaModelData = new IterativeComQueue().initWithPartitionedData(LdaVariable.data, data).initWithBroadcastData(LdaVariable.vocabularySize, colNum).add(new EmCorpusStep(numTopic, alpha, beta, seed)).add(new AllReduce(LdaVariable.nWordTopics)).add(new EmLogLikelihood(numTopic, alpha, beta, numIter)).add(new AllReduce(LdaVariable.logLikelihood)).closeWith(new BuildEmLdaModel(numTopic, alpha, beta)).setMaxIter(numIter).exec();
    DataSet<Row> model = ldaModelData.flatMap(new BuildResModel(seed)).withBroadcastSet(resDocCountModel, "DocCountModel");
    setOutput(model, new LdaModelDataConverter().getModelSchema());
    saveWordTopicModelAndPerplexity(model, numTopic, false);
}
Also used : IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) BuildEmLdaModel(com.alibaba.alink.operator.common.clustering.lda.BuildEmLdaModel) EmCorpusStep(com.alibaba.alink.operator.common.clustering.lda.EmCorpusStep) EmLogLikelihood(com.alibaba.alink.operator.common.clustering.lda.EmLogLikelihood) LdaModelDataConverter(com.alibaba.alink.operator.common.clustering.LdaModelDataConverter) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) Row(org.apache.flink.types.Row) Vector(com.alibaba.alink.common.linalg.Vector) SparseVector(com.alibaba.alink.common.linalg.SparseVector)

Example 13 with BaseVectorSummary

use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.

the class LdaTrainBatchOp method linkFrom.

@Override
public LdaTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
    BatchOperator<?> in = checkAndGetFirst(inputs);
    int parallelism = BatchOperator.getExecutionEnvironmentFromOps(in).getParallelism();
    long mlEnvId = getMLEnvironmentId();
    int numTopic = getTopicNum();
    int numIter = getNumIter();
    Integer seed = getRandomSeed();
    boolean setSeed = true;
    if (seed == null) {
        setSeed = false;
    }
    String vectorColName = getSelectedCol();
    Method optimizer = getMethod();
    final DataSet<DocCountVectorizerModelData> resDocCountModel = DocCountVectorizerTrainBatchOp.generateDocCountModel(getParams(), in);
    int index = TableUtil.findColIndexWithAssertAndHint(in.getColNames(), vectorColName);
    DataSet<Row> resRow = in.getDataSet().flatMap(new Document2Vector(index)).withBroadcastSet(resDocCountModel, "DocCountModel");
    TypeInformation<?>[] types = in.getColTypes().clone();
    types[index] = TypeInformation.of(SparseVector.class);
    BatchOperator trainData = new TableSourceBatchOp(DataSetConversionUtil.toTable(mlEnvId, resRow, in.getColNames(), types)).setMLEnvironmentId(mlEnvId);
    Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat = StatisticsHelper.summaryHelper(trainData, null, vectorColName);
    if (setSeed) {
        DataSet<Tuple2<Long, Vector>> hashValue = dataAndStat.f0.map(new MapHashValue(seed)).partitionCustom(new Partitioner<Long>() {

            private static final long serialVersionUID = 5179898093029365608L;

            @Override
            public int partition(Long key, int numPartitions) {
                return (int) (Math.abs(key) % ((long) numPartitions));
            }
        }, 0);
        dataAndStat.f0 = hashValue.mapPartition(new MapPartitionFunction<Tuple2<Long, Vector>, Vector>() {

            private static final long serialVersionUID = -550512476573928350L;

            @Override
            public void mapPartition(Iterable<Tuple2<Long, Vector>> values, Collector<Vector> out) throws Exception {
                List<Tuple2<Long, Vector>> listValues = Lists.newArrayList(values);
                listValues.sort(new Comparator<Tuple2<Long, Vector>>() {

                    @Override
                    public int compare(Tuple2<Long, Vector> o1, Tuple2<Long, Vector> o2) {
                        int compare1 = o1.f0.compareTo(o2.f0);
                        if (compare1 == 0) {
                            String o1s = o1.f1.toString();
                            String o2s = o2.f1.toString();
                            return o1s.compareTo(o2s);
                        }
                        return compare1;
                    }
                });
                listValues.forEach(x -> out.collect(x.f1));
            }
        }).setParallelism(parallelism);
    }
    double beta = getParams().get(BETA);
    double alpha = getParams().get(ALPHA);
    int gammaShape = 250;
    switch(optimizer) {
        case EM:
            gibbsSample(dataAndStat, numTopic, numIter, alpha, beta, resDocCountModel, seed);
            break;
        case Online:
            online(dataAndStat, numTopic, numIter, alpha, beta, resDocCountModel, gammaShape, seed);
            break;
        default:
            throw new NotImplementedException("Optimizer not support.");
    }
    return this;
}
Also used : LdaUtil(com.alibaba.alink.operator.common.clustering.lda.LdaUtil) OnlineLogLikelihood(com.alibaba.alink.operator.common.clustering.lda.OnlineLogLikelihood) Arrays(java.util.Arrays) Tuple2(org.apache.flink.api.java.tuple.Tuple2) OnlineCorpusStep(com.alibaba.alink.operator.common.clustering.lda.OnlineCorpusStep) LdaTrainParams(com.alibaba.alink.params.clustering.LdaTrainParams) MapFunction(org.apache.flink.api.common.functions.MapFunction) WithModelInfoBatchOp(com.alibaba.alink.common.lazy.WithModelInfoBatchOp) DataSet(org.apache.flink.api.java.DataSet) NotImplementedException(org.apache.commons.lang.NotImplementedException) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) Vector(com.alibaba.alink.common.linalg.Vector) RichMapPartitionFunction(org.apache.flink.api.common.functions.RichMapPartitionFunction) Table(org.apache.flink.table.api.Table) AllReduce(com.alibaba.alink.common.comqueue.communication.AllReduce) LdaModelMapper(com.alibaba.alink.operator.common.clustering.LdaModelMapper) UpdateLambdaAndAlpha(com.alibaba.alink.operator.common.clustering.lda.UpdateLambdaAndAlpha) EmCorpusStep(com.alibaba.alink.operator.common.clustering.lda.EmCorpusStep) List(java.util.List) IterativeComQueue(com.alibaba.alink.common.comqueue.IterativeComQueue) DataSetConversionUtil(com.alibaba.alink.common.utils.DataSetConversionUtil) LdaModelDataConverter(com.alibaba.alink.operator.common.clustering.LdaModelDataConverter) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) HashFunction(org.apache.flink.shaded.guava18.com.google.common.hash.HashFunction) MapPartitionFunction(org.apache.flink.api.common.functions.MapPartitionFunction) Row(org.apache.flink.types.Row) DocCountVectorizerModelMapper(com.alibaba.alink.operator.common.nlp.DocCountVectorizerModelMapper) LdaVariable(com.alibaba.alink.operator.common.clustering.lda.LdaVariable) Hashing.murmur3_128(org.apache.flink.shaded.guava18.com.google.common.hash.Hashing.murmur3_128) RichFlatMapFunction(org.apache.flink.api.common.functions.RichFlatMapFunction) BuildOnlineLdaModel(com.alibaba.alink.operator.common.clustering.lda.BuildOnlineLdaModel) TableUtil(com.alibaba.alink.common.utils.TableUtil) HashMap(java.util.HashMap) DocCountVectorizerTrainBatchOp(com.alibaba.alink.operator.batch.nlp.DocCountVectorizerTrainBatchOp) BuildEmLdaModel(com.alibaba.alink.operator.common.clustering.lda.BuildEmLdaModel) ArrayList(java.util.ArrayList) Partitioner(org.apache.flink.api.common.functions.Partitioner) DocCountVectorizerModelData(com.alibaba.alink.operator.common.nlp.DocCountVectorizerModelData) Lists(com.google.common.collect.Lists) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Collector(org.apache.flink.util.Collector) DenseMatrix(com.alibaba.alink.common.linalg.DenseMatrix) BatchOperator(com.alibaba.alink.operator.batch.BatchOperator) EmLogLikelihood(com.alibaba.alink.operator.common.clustering.lda.EmLogLikelihood) Types(org.apache.flink.api.common.typeinfo.Types) LdaModelData(com.alibaba.alink.operator.common.clustering.LdaModelData) Configuration(org.apache.flink.configuration.Configuration) RowCollector(com.alibaba.alink.common.utils.RowCollector) TableSourceBatchOp(com.alibaba.alink.operator.batch.source.TableSourceBatchOp) StatisticsHelper(com.alibaba.alink.operator.common.statistics.StatisticsHelper) RandomDataGenerator(org.apache.commons.math3.random.RandomDataGenerator) FeatureType(com.alibaba.alink.operator.common.nlp.FeatureType) SparseVector(com.alibaba.alink.common.linalg.SparseVector) Comparator(java.util.Comparator) Params(org.apache.flink.ml.api.misc.param.Params) DataSet(org.apache.flink.api.java.DataSet) NotImplementedException(org.apache.commons.lang.NotImplementedException) SparseVector(com.alibaba.alink.common.linalg.SparseVector) TableSourceBatchOp(com.alibaba.alink.operator.batch.source.TableSourceBatchOp) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) Comparator(java.util.Comparator) DocCountVectorizerModelData(com.alibaba.alink.operator.common.nlp.DocCountVectorizerModelData) List(java.util.List) ArrayList(java.util.ArrayList) Vector(com.alibaba.alink.common.linalg.Vector) SparseVector(com.alibaba.alink.common.linalg.SparseVector) BatchOperator(com.alibaba.alink.operator.batch.BatchOperator) NotImplementedException(org.apache.commons.lang.NotImplementedException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Row(org.apache.flink.types.Row)

Example 14 with BaseVectorSummary

use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.

the class BisectingKMeansTrainBatchOp method linkFrom.

/**
 * The bisecting kmeans algorithm has nested loops. In the outer loop, cluster centers
 * are splited. In the inner loop, the splited centers are iteratively refined.
 * However, there lacks nested loop semantic in Flink, so we have to flatten the nested loop
 * in our implementation.
 */
@Override
public BisectingKMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
    BatchOperator<?> in = checkAndGetFirst(inputs);
    // get the input parameter's value
    final DistanceType distanceType = getDistanceType();
    final int k = this.getK();
    final int maxIter = this.getMaxIter();
    final String vectorColName = this.getVectorCol();
    final int minDivisibleClusterSize = this.getMinDivisibleClusterSize();
    ContinuousDistance distance = distanceType.getFastDistance();
    Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> vectorsAndStat = StatisticsHelper.summaryHelper(in, null, vectorColName);
    DataSet<Integer> dim = vectorsAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {

        private static final long serialVersionUID = 5358843841535961680L;

        @Override
        public Integer map(BaseVectorSummary value) {
            Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
            return value.vectorSize();
        }
    });
    // tuple: sampleId, features, assignment
    DataSet<Tuple3<Long, Vector, Long>> initialAssignment = DataSetUtils.zipWithUniqueId(vectorsAndStat.f0).map(new RichMapFunction<Tuple2<Long, Vector>, Tuple3<Long, Vector, Long>>() {

        private static final long serialVersionUID = -6036596630416015773L;

        private int vectorSize;

        @Override
        public void open(Configuration params) {
            vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
        }

        @Override
        public Tuple3<Long, Vector, Long> map(Tuple2<Long, Vector> value) {
            if (value.f1 instanceof SparseVector) {
                ((SparseVector) value.f1).setSize(vectorSize);
            }
            return Tuple3.of(value.f0, value.f1, ROOT_INDEX);
        }
    }).withBroadcastSet(dim, VECTOR_SIZE);
    DataSet<Tuple2<Long, ClusterSummary>> clustersSummaries = summary(initialAssignment.project(2, 1), dim, distanceType);
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> clustersSummariesAndIterInfo = clustersSummaries.map(new MapFunction<Tuple2<Long, ClusterSummary>, Tuple3<Long, ClusterSummary, IterInfo>>() {

        private static final long serialVersionUID = -3883958936263294331L;

        @Override
        public Tuple3<Long, ClusterSummary, IterInfo> map(Tuple2<Long, ClusterSummary> value) {
            return Tuple3.of(value.f0, value.f1, new IterInfo(maxIter));
        }
    }).withForwardedFields("f0;f1");
    IterativeDataSet<Tuple3<Long, ClusterSummary, IterInfo>> loop = clustersSummariesAndIterInfo.iterate(Integer.MAX_VALUE);
    DataSet<Tuple1<IterInfo>> iterInfo = loop.<Tuple1<IterInfo>>project(2).first(1);
    // Get all cluster summaries. Split clusters if at the first step of inner iterations.
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> allClusters = getOrSplitClusters(loop, k, minDivisibleClusterSize, getRandomSeed());
    DataSet<Long> divisibleClusterIndices = getDivisibleClusterIndices(allClusters);
    DataSet<Tuple2<Long, DenseVector>> newClusterCenters = getNewClusterCenters(allClusters);
    DataSet<Tuple3<Long, Vector, Long>> newAssignment = updateAssignment(initialAssignment, divisibleClusterIndices, newClusterCenters, distance, iterInfo);
    DataSet<Tuple2<Long, ClusterSummary>> newClusterSummaries = summary(newAssignment.project(2, 1), dim, distanceType);
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> updatedClusterSummariesWithIterInfo = updateClusterSummariesAndIterInfo(allClusters, newClusterSummaries);
    DataSet<Integer> stopCriterion = iterInfo.flatMap(new FlatMapFunction<Tuple1<IterInfo>, Integer>() {

        private static final long serialVersionUID = -4258243788034193744L;

        @Override
        public void flatMap(Tuple1<IterInfo> value, Collector<Integer> out) {
            if (!(value.f0.atLastInnerIterStep() && value.f0.atLastBisectionStep())) {
                out.collect(0);
            }
        }
    });
    DataSet<Tuple2<Long, ClusterSummary>> finalClusterSummaries = loop.closeWith(updatedClusterSummariesWithIterInfo, stopCriterion).project(0, 1);
    DataSet<Row> modelRows = finalClusterSummaries.mapPartition(new SaveModel(distanceType, vectorColName, k)).withBroadcastSet(dim, VECTOR_SIZE).setParallelism(1);
    this.setOutput(modelRows, new BisectingKMeansModelDataConverter().getModelSchema());
    return this;
}
Also used : Configuration(org.apache.flink.configuration.Configuration) DataSet(org.apache.flink.api.java.DataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) SparseVector(com.alibaba.alink.common.linalg.SparseVector) MapFunction(org.apache.flink.api.common.functions.MapFunction) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) BisectingKMeansModelDataConverter(com.alibaba.alink.operator.common.clustering.BisectingKMeansModelDataConverter) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) Vector(com.alibaba.alink.common.linalg.Vector) DenseVector(com.alibaba.alink.common.linalg.DenseVector) SparseVector(com.alibaba.alink.common.linalg.SparseVector) ClusterSummary(com.alibaba.alink.operator.common.clustering.BisectingKMeansModelData.ClusterSummary) ContinuousDistance(com.alibaba.alink.operator.common.distance.ContinuousDistance) Tuple1(org.apache.flink.api.java.tuple.Tuple1) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Row(org.apache.flink.types.Row)

Example 15 with BaseVectorSummary

use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.

the class VectorMinMaxScalerModelDataConverter method serializeModel.

/**
 * Serialize the model data to "Tuple3<Params, List<String>, List<Row>>".
 *
 * @param modelData The model data to serialize.
 * @return The serialization result.
 */
public Tuple3<Params, Iterable<String>, Iterable<Row>> serializeModel(Tuple3<Double, Double, BaseVectorSummary> modelData) {
    double min = modelData.f0;
    double max = modelData.f1;
    BaseVectorSummary summary = modelData.f2;
    double[] eMins;
    double[] eMaxs;
    if (summary.min() instanceof DenseVector) {
        eMins = ((DenseVector) summary.min()).getData();
    } else {
        eMins = ((SparseVector) summary.min()).toDenseVector().getData();
    }
    if (summary.max() instanceof DenseVector) {
        eMaxs = ((DenseVector) summary.max()).getData();
    } else {
        eMaxs = ((SparseVector) summary.max()).toDenseVector().getData();
    }
    List<String> data = new ArrayList<>();
    data.add(JsonConverter.toJson(eMins));
    data.add(JsonConverter.toJson(eMaxs));
    Params meta = new Params().set(VectorMinMaxScalerTrainParams.MIN, min).set(VectorMinMaxScalerTrainParams.MAX, max).set(VectorMinMaxScalerTrainParams.SELECTED_COL, vectorColName);
    return Tuple3.of(meta, data, new ArrayList<>());
}
Also used : BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) ArrayList(java.util.ArrayList) VectorMinMaxScalerTrainParams(com.alibaba.alink.params.dataproc.vector.VectorMinMaxScalerTrainParams) Params(org.apache.flink.ml.api.misc.param.Params) SparseVector(com.alibaba.alink.common.linalg.SparseVector) DenseVector(com.alibaba.alink.common.linalg.DenseVector)

Aggregations

BaseVectorSummary (com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary)24 Row (org.apache.flink.types.Row)13 Vector (com.alibaba.alink.common.linalg.Vector)11 DenseVector (com.alibaba.alink.common.linalg.DenseVector)9 SparseVector (com.alibaba.alink.common.linalg.SparseVector)9 BatchOperator (com.alibaba.alink.operator.batch.BatchOperator)9 DataSet (org.apache.flink.api.java.DataSet)9 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 Params (org.apache.flink.ml.api.misc.param.Params)5 MemSourceBatchOp (com.alibaba.alink.operator.batch.source.MemSourceBatchOp)4 IterativeComQueue (com.alibaba.alink.common.comqueue.IterativeComQueue)3 AllReduce (com.alibaba.alink.common.comqueue.communication.AllReduce)3 VectorSummarizerBatchOp (com.alibaba.alink.operator.batch.statistics.VectorSummarizerBatchOp)3 LdaModelDataConverter (com.alibaba.alink.operator.common.clustering.LdaModelDataConverter)3 MapFunction (org.apache.flink.api.common.functions.MapFunction)3 RichMapFunction (org.apache.flink.api.common.functions.RichMapFunction)3 Configuration (org.apache.flink.configuration.Configuration)3 DenseMatrix (com.alibaba.alink.common.linalg.DenseMatrix)2