use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.
the class KMeansTrainBatchOp method linkFrom.
@Override
public KMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
final int maxIter = this.getMaxIter();
final double tol = this.getEpsilon();
final String vectorColName = this.getVectorCol();
final DistanceType distanceType = getDistanceType();
FastDistance distance = distanceType.getFastDistance();
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> statistics = StatisticsHelper.summaryHelper(in, null, vectorColName);
DataSet<Integer> vectorSize = statistics.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = 4184586558834055401L;
@Override
public Integer map(BaseVectorSummary value) {
Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
return value.vectorSize();
}
});
DataSet<FastDistanceVectorData> data = statistics.f0.rebalance().map(new RichMapFunction<Vector, FastDistanceVectorData>() {
private static final long serialVersionUID = -7443226889326704768L;
private int vectorSize;
@Override
public void open(Configuration params) {
vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
}
@Override
public FastDistanceVectorData map(Vector value) {
if (value instanceof SparseVector) {
((SparseVector) value).setSize(vectorSize);
}
return distance.prepareVectorData(Row.of(value), 0);
}
}).withBroadcastSet(vectorSize, VECTOR_SIZE);
DataSet<FastDistanceMatrixData> initCentroid = KMeansInitCentroids.initKmeansCentroids(data, distance, this.getParams(), vectorSize, getRandomSeed());
DataSet<Row> finalCentroid = iterateICQ(initCentroid, data, vectorSize, maxIter, tol, distance, HasKMeansWithHaversineDistanceType.DistanceType.valueOf(distanceType.name()), vectorColName, null, null);
this.setOutput(finalCentroid, new KMeansModelDataConverter().getModelSchema());
return this;
}
use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.
the class LdaTrainBatchOp method gibbsSample.
private void gibbsSample(Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat, int numTopic, int numIter, double alpha, double beta, DataSet<DocCountVectorizerModelData> resDocCountModel, Integer seed) {
if (beta == -1) {
beta = 0.01 + 1;
}
if (alpha == -1) {
alpha = 50.0 / numTopic + 1;
}
DataSet<Vector> data = dataAndStat.f0;
DataSet<Integer> colNum = dataAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = -7170259222827300492L;
@Override
public Integer map(BaseVectorSummary srt) {
return srt.vectorSize();
}
});
DataSet<Row> ldaModelData = new IterativeComQueue().initWithPartitionedData(LdaVariable.data, data).initWithBroadcastData(LdaVariable.vocabularySize, colNum).add(new EmCorpusStep(numTopic, alpha, beta, seed)).add(new AllReduce(LdaVariable.nWordTopics)).add(new EmLogLikelihood(numTopic, alpha, beta, numIter)).add(new AllReduce(LdaVariable.logLikelihood)).closeWith(new BuildEmLdaModel(numTopic, alpha, beta)).setMaxIter(numIter).exec();
DataSet<Row> model = ldaModelData.flatMap(new BuildResModel(seed)).withBroadcastSet(resDocCountModel, "DocCountModel");
setOutput(model, new LdaModelDataConverter().getModelSchema());
saveWordTopicModelAndPerplexity(model, numTopic, false);
}
use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.
the class LdaTrainBatchOp method linkFrom.
@Override
public LdaTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
int parallelism = BatchOperator.getExecutionEnvironmentFromOps(in).getParallelism();
long mlEnvId = getMLEnvironmentId();
int numTopic = getTopicNum();
int numIter = getNumIter();
Integer seed = getRandomSeed();
boolean setSeed = true;
if (seed == null) {
setSeed = false;
}
String vectorColName = getSelectedCol();
Method optimizer = getMethod();
final DataSet<DocCountVectorizerModelData> resDocCountModel = DocCountVectorizerTrainBatchOp.generateDocCountModel(getParams(), in);
int index = TableUtil.findColIndexWithAssertAndHint(in.getColNames(), vectorColName);
DataSet<Row> resRow = in.getDataSet().flatMap(new Document2Vector(index)).withBroadcastSet(resDocCountModel, "DocCountModel");
TypeInformation<?>[] types = in.getColTypes().clone();
types[index] = TypeInformation.of(SparseVector.class);
BatchOperator trainData = new TableSourceBatchOp(DataSetConversionUtil.toTable(mlEnvId, resRow, in.getColNames(), types)).setMLEnvironmentId(mlEnvId);
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> dataAndStat = StatisticsHelper.summaryHelper(trainData, null, vectorColName);
if (setSeed) {
DataSet<Tuple2<Long, Vector>> hashValue = dataAndStat.f0.map(new MapHashValue(seed)).partitionCustom(new Partitioner<Long>() {
private static final long serialVersionUID = 5179898093029365608L;
@Override
public int partition(Long key, int numPartitions) {
return (int) (Math.abs(key) % ((long) numPartitions));
}
}, 0);
dataAndStat.f0 = hashValue.mapPartition(new MapPartitionFunction<Tuple2<Long, Vector>, Vector>() {
private static final long serialVersionUID = -550512476573928350L;
@Override
public void mapPartition(Iterable<Tuple2<Long, Vector>> values, Collector<Vector> out) throws Exception {
List<Tuple2<Long, Vector>> listValues = Lists.newArrayList(values);
listValues.sort(new Comparator<Tuple2<Long, Vector>>() {
@Override
public int compare(Tuple2<Long, Vector> o1, Tuple2<Long, Vector> o2) {
int compare1 = o1.f0.compareTo(o2.f0);
if (compare1 == 0) {
String o1s = o1.f1.toString();
String o2s = o2.f1.toString();
return o1s.compareTo(o2s);
}
return compare1;
}
});
listValues.forEach(x -> out.collect(x.f1));
}
}).setParallelism(parallelism);
}
double beta = getParams().get(BETA);
double alpha = getParams().get(ALPHA);
int gammaShape = 250;
switch(optimizer) {
case EM:
gibbsSample(dataAndStat, numTopic, numIter, alpha, beta, resDocCountModel, seed);
break;
case Online:
online(dataAndStat, numTopic, numIter, alpha, beta, resDocCountModel, gammaShape, seed);
break;
default:
throw new NotImplementedException("Optimizer not support.");
}
return this;
}
use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.
the class BisectingKMeansTrainBatchOp method linkFrom.
/**
* The bisecting kmeans algorithm has nested loops. In the outer loop, cluster centers
* are splited. In the inner loop, the splited centers are iteratively refined.
* However, there lacks nested loop semantic in Flink, so we have to flatten the nested loop
* in our implementation.
*/
@Override
public BisectingKMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
// get the input parameter's value
final DistanceType distanceType = getDistanceType();
final int k = this.getK();
final int maxIter = this.getMaxIter();
final String vectorColName = this.getVectorCol();
final int minDivisibleClusterSize = this.getMinDivisibleClusterSize();
ContinuousDistance distance = distanceType.getFastDistance();
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> vectorsAndStat = StatisticsHelper.summaryHelper(in, null, vectorColName);
DataSet<Integer> dim = vectorsAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = 5358843841535961680L;
@Override
public Integer map(BaseVectorSummary value) {
Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
return value.vectorSize();
}
});
// tuple: sampleId, features, assignment
DataSet<Tuple3<Long, Vector, Long>> initialAssignment = DataSetUtils.zipWithUniqueId(vectorsAndStat.f0).map(new RichMapFunction<Tuple2<Long, Vector>, Tuple3<Long, Vector, Long>>() {
private static final long serialVersionUID = -6036596630416015773L;
private int vectorSize;
@Override
public void open(Configuration params) {
vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
}
@Override
public Tuple3<Long, Vector, Long> map(Tuple2<Long, Vector> value) {
if (value.f1 instanceof SparseVector) {
((SparseVector) value.f1).setSize(vectorSize);
}
return Tuple3.of(value.f0, value.f1, ROOT_INDEX);
}
}).withBroadcastSet(dim, VECTOR_SIZE);
DataSet<Tuple2<Long, ClusterSummary>> clustersSummaries = summary(initialAssignment.project(2, 1), dim, distanceType);
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> clustersSummariesAndIterInfo = clustersSummaries.map(new MapFunction<Tuple2<Long, ClusterSummary>, Tuple3<Long, ClusterSummary, IterInfo>>() {
private static final long serialVersionUID = -3883958936263294331L;
@Override
public Tuple3<Long, ClusterSummary, IterInfo> map(Tuple2<Long, ClusterSummary> value) {
return Tuple3.of(value.f0, value.f1, new IterInfo(maxIter));
}
}).withForwardedFields("f0;f1");
IterativeDataSet<Tuple3<Long, ClusterSummary, IterInfo>> loop = clustersSummariesAndIterInfo.iterate(Integer.MAX_VALUE);
DataSet<Tuple1<IterInfo>> iterInfo = loop.<Tuple1<IterInfo>>project(2).first(1);
// Get all cluster summaries. Split clusters if at the first step of inner iterations.
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> allClusters = getOrSplitClusters(loop, k, minDivisibleClusterSize, getRandomSeed());
DataSet<Long> divisibleClusterIndices = getDivisibleClusterIndices(allClusters);
DataSet<Tuple2<Long, DenseVector>> newClusterCenters = getNewClusterCenters(allClusters);
DataSet<Tuple3<Long, Vector, Long>> newAssignment = updateAssignment(initialAssignment, divisibleClusterIndices, newClusterCenters, distance, iterInfo);
DataSet<Tuple2<Long, ClusterSummary>> newClusterSummaries = summary(newAssignment.project(2, 1), dim, distanceType);
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> updatedClusterSummariesWithIterInfo = updateClusterSummariesAndIterInfo(allClusters, newClusterSummaries);
DataSet<Integer> stopCriterion = iterInfo.flatMap(new FlatMapFunction<Tuple1<IterInfo>, Integer>() {
private static final long serialVersionUID = -4258243788034193744L;
@Override
public void flatMap(Tuple1<IterInfo> value, Collector<Integer> out) {
if (!(value.f0.atLastInnerIterStep() && value.f0.atLastBisectionStep())) {
out.collect(0);
}
}
});
DataSet<Tuple2<Long, ClusterSummary>> finalClusterSummaries = loop.closeWith(updatedClusterSummariesWithIterInfo, stopCriterion).project(0, 1);
DataSet<Row> modelRows = finalClusterSummaries.mapPartition(new SaveModel(distanceType, vectorColName, k)).withBroadcastSet(dim, VECTOR_SIZE).setParallelism(1);
this.setOutput(modelRows, new BisectingKMeansModelDataConverter().getModelSchema());
return this;
}
use of com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary in project Alink by alibaba.
the class VectorMinMaxScalerModelDataConverter method serializeModel.
/**
* Serialize the model data to "Tuple3<Params, List<String>, List<Row>>".
*
* @param modelData The model data to serialize.
* @return The serialization result.
*/
public Tuple3<Params, Iterable<String>, Iterable<Row>> serializeModel(Tuple3<Double, Double, BaseVectorSummary> modelData) {
double min = modelData.f0;
double max = modelData.f1;
BaseVectorSummary summary = modelData.f2;
double[] eMins;
double[] eMaxs;
if (summary.min() instanceof DenseVector) {
eMins = ((DenseVector) summary.min()).getData();
} else {
eMins = ((SparseVector) summary.min()).toDenseVector().getData();
}
if (summary.max() instanceof DenseVector) {
eMaxs = ((DenseVector) summary.max()).getData();
} else {
eMaxs = ((SparseVector) summary.max()).toDenseVector().getData();
}
List<String> data = new ArrayList<>();
data.add(JsonConverter.toJson(eMins));
data.add(JsonConverter.toJson(eMaxs));
Params meta = new Params().set(VectorMinMaxScalerTrainParams.MIN, min).set(VectorMinMaxScalerTrainParams.MAX, max).set(VectorMinMaxScalerTrainParams.SELECTED_COL, vectorColName);
return Tuple3.of(meta, data, new ArrayList<>());
}
Aggregations