use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class QuantileDiscretizerModelMapperTest method testDropLast.
@Test
public void testDropLast() throws Exception {
Params params = new Params().set(QuantileDiscretizerPredictParams.ENCODE, HasEncodeWithoutWoe.Encode.VECTOR).set(QuantileDiscretizerPredictParams.SELECTED_COLS, new String[] { "col2", "col3" }).set(QuantileDiscretizerPredictParams.DROP_LAST, true);
QuantileDiscretizerModelMapper mapper = new QuantileDiscretizerModelMapper(modelSchema, dataSchema, params);
mapper.loadModel(model);
assertEquals(mapper.map(defaultRow), Row.of("a", new SparseVector(3, new int[] { 0 }, new double[] { 1.0 }), new SparseVector(3)));
assertEquals(mapper.map(nullElseRow), Row.of("b", new SparseVector(3, new int[] { 2 }, new double[] { 1.0 }), new SparseVector(3, new int[] { 1 }, new double[] { 1.0 })));
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class EvaluationUtilTest method assertBinaryMetrics.
private void assertBinaryMetrics(BaseMetricsSummary baseMetric) {
Assert.assertTrue(baseMetric instanceof BinaryMetricsSummary);
BinaryMetricsSummary metrics = (BinaryMetricsSummary) baseMetric;
Assert.assertEquals(5, metrics.total);
Assert.assertEquals(2.987, metrics.logLoss, 0.01);
Assert.assertEquals(metrics.positiveBin.length, 100000);
SparseVector vec = new SparseVector(100000, new int[] { 70000, 80000, 90000 }, new double[] { 1, 1, 1 });
for (int i = 0; i < metrics.positiveBin.length; i++) {
Assert.assertEquals((int) vec.get(i), metrics.positiveBin[i]);
}
Assert.assertEquals(metrics.negativeBin.length, 100000);
vec = new SparseVector(100000, new int[] { 60000, 75000 }, new double[] { 1, 1 });
for (int i = 0; i < metrics.negativeBin.length; i++) {
Assert.assertEquals((int) vec.get(i), metrics.negativeBin[i]);
}
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class SparseVectorSummaryTest method summarizer.
private SparseVectorSummary summarizer() {
SparseVector[] data = new SparseVector[] { new SparseVector(5, new int[] { 0, 1, 2 }, new double[] { 1.0, -1.0, 3.0 }), new SparseVector(5, new int[] { 1, 2, 3 }, new double[] { 2.0, -2.0, 3.0 }), new SparseVector(5, new int[] { 2, 3, 4 }, new double[] { 3.0, -3.0, 3.0 }), new SparseVector(5, new int[] { 0, 2, 3 }, new double[] { 4.0, -4.0, 3.0 }), new SparseVector(5, new int[] { 0, 1, 4 }, new double[] { 5.0, -5.0, 3.0 }) };
SparseVectorSummarizer summarizer = new SparseVectorSummarizer();
for (SparseVector aData : data) {
summarizer.visit(aData);
}
return (SparseVectorSummary) summarizer.toSummary();
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class KMeansTrainBatchOp method linkFrom.
@Override
public KMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
final int maxIter = this.getMaxIter();
final double tol = this.getEpsilon();
final String vectorColName = this.getVectorCol();
final DistanceType distanceType = getDistanceType();
FastDistance distance = distanceType.getFastDistance();
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> statistics = StatisticsHelper.summaryHelper(in, null, vectorColName);
DataSet<Integer> vectorSize = statistics.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = 4184586558834055401L;
@Override
public Integer map(BaseVectorSummary value) {
Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
return value.vectorSize();
}
});
DataSet<FastDistanceVectorData> data = statistics.f0.rebalance().map(new RichMapFunction<Vector, FastDistanceVectorData>() {
private static final long serialVersionUID = -7443226889326704768L;
private int vectorSize;
@Override
public void open(Configuration params) {
vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
}
@Override
public FastDistanceVectorData map(Vector value) {
if (value instanceof SparseVector) {
((SparseVector) value).setSize(vectorSize);
}
return distance.prepareVectorData(Row.of(value), 0);
}
}).withBroadcastSet(vectorSize, VECTOR_SIZE);
DataSet<FastDistanceMatrixData> initCentroid = KMeansInitCentroids.initKmeansCentroids(data, distance, this.getParams(), vectorSize, getRandomSeed());
DataSet<Row> finalCentroid = iterateICQ(initCentroid, data, vectorSize, maxIter, tol, distance, HasKMeansWithHaversineDistanceType.DistanceType.valueOf(distanceType.name()), vectorColName, null, null);
this.setOutput(finalCentroid, new KMeansModelDataConverter().getModelSchema());
return this;
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class BisectingKMeansTrainBatchOp method linkFrom.
/**
* The bisecting kmeans algorithm has nested loops. In the outer loop, cluster centers
* are splited. In the inner loop, the splited centers are iteratively refined.
* However, there lacks nested loop semantic in Flink, so we have to flatten the nested loop
* in our implementation.
*/
@Override
public BisectingKMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
// get the input parameter's value
final DistanceType distanceType = getDistanceType();
final int k = this.getK();
final int maxIter = this.getMaxIter();
final String vectorColName = this.getVectorCol();
final int minDivisibleClusterSize = this.getMinDivisibleClusterSize();
ContinuousDistance distance = distanceType.getFastDistance();
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> vectorsAndStat = StatisticsHelper.summaryHelper(in, null, vectorColName);
DataSet<Integer> dim = vectorsAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = 5358843841535961680L;
@Override
public Integer map(BaseVectorSummary value) {
Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
return value.vectorSize();
}
});
// tuple: sampleId, features, assignment
DataSet<Tuple3<Long, Vector, Long>> initialAssignment = DataSetUtils.zipWithUniqueId(vectorsAndStat.f0).map(new RichMapFunction<Tuple2<Long, Vector>, Tuple3<Long, Vector, Long>>() {
private static final long serialVersionUID = -6036596630416015773L;
private int vectorSize;
@Override
public void open(Configuration params) {
vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
}
@Override
public Tuple3<Long, Vector, Long> map(Tuple2<Long, Vector> value) {
if (value.f1 instanceof SparseVector) {
((SparseVector) value.f1).setSize(vectorSize);
}
return Tuple3.of(value.f0, value.f1, ROOT_INDEX);
}
}).withBroadcastSet(dim, VECTOR_SIZE);
DataSet<Tuple2<Long, ClusterSummary>> clustersSummaries = summary(initialAssignment.project(2, 1), dim, distanceType);
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> clustersSummariesAndIterInfo = clustersSummaries.map(new MapFunction<Tuple2<Long, ClusterSummary>, Tuple3<Long, ClusterSummary, IterInfo>>() {
private static final long serialVersionUID = -3883958936263294331L;
@Override
public Tuple3<Long, ClusterSummary, IterInfo> map(Tuple2<Long, ClusterSummary> value) {
return Tuple3.of(value.f0, value.f1, new IterInfo(maxIter));
}
}).withForwardedFields("f0;f1");
IterativeDataSet<Tuple3<Long, ClusterSummary, IterInfo>> loop = clustersSummariesAndIterInfo.iterate(Integer.MAX_VALUE);
DataSet<Tuple1<IterInfo>> iterInfo = loop.<Tuple1<IterInfo>>project(2).first(1);
// Get all cluster summaries. Split clusters if at the first step of inner iterations.
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> allClusters = getOrSplitClusters(loop, k, minDivisibleClusterSize, getRandomSeed());
DataSet<Long> divisibleClusterIndices = getDivisibleClusterIndices(allClusters);
DataSet<Tuple2<Long, DenseVector>> newClusterCenters = getNewClusterCenters(allClusters);
DataSet<Tuple3<Long, Vector, Long>> newAssignment = updateAssignment(initialAssignment, divisibleClusterIndices, newClusterCenters, distance, iterInfo);
DataSet<Tuple2<Long, ClusterSummary>> newClusterSummaries = summary(newAssignment.project(2, 1), dim, distanceType);
DataSet<Tuple3<Long, ClusterSummary, IterInfo>> updatedClusterSummariesWithIterInfo = updateClusterSummariesAndIterInfo(allClusters, newClusterSummaries);
DataSet<Integer> stopCriterion = iterInfo.flatMap(new FlatMapFunction<Tuple1<IterInfo>, Integer>() {
private static final long serialVersionUID = -4258243788034193744L;
@Override
public void flatMap(Tuple1<IterInfo> value, Collector<Integer> out) {
if (!(value.f0.atLastInnerIterStep() && value.f0.atLastBisectionStep())) {
out.collect(0);
}
}
});
DataSet<Tuple2<Long, ClusterSummary>> finalClusterSummaries = loop.closeWith(updatedClusterSummariesWithIterInfo, stopCriterion).project(0, 1);
DataSet<Row> modelRows = finalClusterSummaries.mapPartition(new SaveModel(distanceType, vectorColName, k)).withBroadcastSet(dim, VECTOR_SIZE).setParallelism(1);
this.setOutput(modelRows, new BisectingKMeansModelDataConverter().getModelSchema());
return this;
}
Aggregations