use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class GmmTrainBatchOp method linkFrom.
/**
* Train the Gaussian Mixture model with Expectation-Maximization algorithm.
*/
@Override
public GmmTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
BatchOperator<?> in = checkAndGetFirst(inputs);
final String vectorColName = getVectorCol();
final int numClusters = getK();
final int maxIter = getMaxIter();
final double tol = getEpsilon();
// Extract the vectors from the input operator.
Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> vectorAndSummary = StatisticsHelper.summaryHelper(in, null, vectorColName);
DataSet<Integer> featureSize = vectorAndSummary.f1.map(new MapFunction<BaseVectorSummary, Integer>() {
private static final long serialVersionUID = 8456872852742625845L;
@Override
public Integer map(BaseVectorSummary summary) throws Exception {
return summary.vectorSize();
}
});
DataSet<Vector> data = vectorAndSummary.f0.map(new RichMapFunction<Vector, Vector>() {
private static final long serialVersionUID = -845795862675993897L;
transient int featureSize;
@Override
public void open(Configuration parameters) throws Exception {
List<Integer> bc = getRuntimeContext().getBroadcastVariable("featureSize");
this.featureSize = bc.get(0);
}
@Override
public Vector map(Vector vec) throws Exception {
if (vec instanceof SparseVector) {
((SparseVector) vec).setSize(featureSize);
}
return vec;
}
}).withBroadcastSet(featureSize, "featureSize");
// Initialize the model.
DataSet<Tuple3<Integer, GmmClusterSummary, IterationStatus>> initialModel = initRandom(data, numClusters, getRandomSeed());
// Iteratively update the model with EM algorithm.
IterativeDataSet<Tuple3<Integer, GmmClusterSummary, IterationStatus>> loop = initialModel.iterate(maxIter);
DataSet<Tuple4<Integer, GmmClusterSummary, IterationStatus, MultivariateGaussian>> md = loop.mapPartition(new RichMapPartitionFunction<Tuple3<Integer, GmmClusterSummary, IterationStatus>, Tuple4<Integer, GmmClusterSummary, IterationStatus, MultivariateGaussian>>() {
private static final long serialVersionUID = -1937088240477952410L;
@Override
public void mapPartition(Iterable<Tuple3<Integer, GmmClusterSummary, IterationStatus>> values, Collector<Tuple4<Integer, GmmClusterSummary, IterationStatus, MultivariateGaussian>> collector) throws Exception {
for (Tuple3<Integer, GmmClusterSummary, IterationStatus> value : values) {
DenseVector means = value.f1.mean;
DenseMatrix cov = GmmModelData.expandCovarianceMatrix(value.f1.cov, means.size());
MultivariateGaussian md = new MultivariateGaussian(means, cov);
collector.collect(Tuple4.of(value.f0, value.f1, value.f2, md));
}
}
}).withForwardedFields("f0;f1;f2");
DataSet<Tuple3<Integer, GmmClusterSummary, IterationStatus>> updatedModel = data.<LocalAggregator>mapPartition(new RichMapPartitionFunction<Vector, LocalAggregator>() {
private static final long serialVersionUID = 8356493076036649604L;
transient DenseVector oldWeights;
transient DenseVector[] oldMeans;
transient DenseVector[] oldCovs;
transient MultivariateGaussian[] mnd;
@Override
public void open(Configuration parameters) throws Exception {
oldWeights = new DenseVector(numClusters);
oldMeans = new DenseVector[numClusters];
oldCovs = new DenseVector[numClusters];
mnd = new MultivariateGaussian[numClusters];
}
@Override
public void mapPartition(Iterable<Vector> values, Collector<LocalAggregator> out) throws Exception {
List<Integer> bcNumFeatures = getRuntimeContext().getBroadcastVariable("featureSize");
List<Tuple4<Integer, GmmClusterSummary, IterationStatus, MultivariateGaussian>> bcOldModel = getRuntimeContext().getBroadcastVariable("oldModel");
double prevLogLikelihood = 0.;
for (Tuple4<Integer, GmmClusterSummary, IterationStatus, MultivariateGaussian> t : bcOldModel) {
int clusterId = t.f0;
GmmClusterSummary clusterInfo = t.f1;
prevLogLikelihood = t.f2.currLogLikelihood;
oldWeights.set(clusterId, clusterInfo.weight);
oldMeans[clusterId] = clusterInfo.mean;
oldCovs[clusterId] = clusterInfo.cov;
mnd[clusterId] = new MultivariateGaussian(t.f3);
// mnd[clusterId] = t.f3;
}
LocalAggregator aggregator = new LocalAggregator(numClusters, bcNumFeatures.get(0), prevLogLikelihood, oldWeights, oldMeans, oldCovs, mnd);
values.forEach(aggregator::add);
out.collect(aggregator);
}
}).withBroadcastSet(featureSize, "featureSize").withBroadcastSet(md, "oldModel").name("E-M_step").reduce(new ReduceFunction<LocalAggregator>() {
private static final long serialVersionUID = -6976429920344470952L;
@Override
public LocalAggregator reduce(LocalAggregator value1, LocalAggregator value2) throws Exception {
return value1.merge(value2);
}
}).flatMap(new RichFlatMapFunction<LocalAggregator, Tuple3<Integer, GmmClusterSummary, IterationStatus>>() {
private static final long serialVersionUID = 6599047947335456972L;
@Override
public void flatMap(LocalAggregator aggregator, Collector<Tuple3<Integer, GmmClusterSummary, IterationStatus>> out) throws Exception {
for (int i = 0; i < numClusters; i++) {
double w = aggregator.updatedWeightsSum.get(i);
aggregator.updatedMeansSum[i].scaleEqual(1.0 / w);
aggregator.updatedCovsSum[i].scaleEqual(1.0 / w);
GmmClusterSummary model = new GmmClusterSummary(i, w / aggregator.totalCount, aggregator.updatedMeansSum[i], aggregator.updatedCovsSum[i]);
// note that we use Cov(X,Y) = E[XY] - E[X]E[Y] to compute Cov(X,Y)
int featureSize = model.mean.size();
for (int m = 0; m < featureSize; m++) {
// loop over columns
for (int n = m; n < featureSize; n++) {
int pos = GmmModelData.getElementPositionInCompactMatrix(m, n, featureSize);
model.cov.add(pos, -1.0 * model.mean.get(m) * model.mean.get(n));
}
}
IterationStatus stat = new IterationStatus();
stat.prevLogLikelihood = aggregator.prevLogLikelihood;
stat.currLogLikelihood = aggregator.newLogLikelihood;
out.collect(Tuple3.of(i, model, stat));
}
}
}).partitionCustom(new Partitioner<Integer>() {
private static final long serialVersionUID = 1006932050560340472L;
@Override
public int partition(Integer key, int numPartitions) {
return key % numPartitions;
}
}, 0);
// Check whether stop criterion is met.
DataSet<Boolean> criterion = updatedModel.first(1).flatMap(new RichFlatMapFunction<Tuple3<Integer, GmmClusterSummary, IterationStatus>, Boolean>() {
private static final long serialVersionUID = 6890280483282243057L;
@Override
public void flatMap(Tuple3<Integer, GmmClusterSummary, IterationStatus> value, Collector<Boolean> out) throws Exception {
IterationStatus stat = value.f2;
int stepNo = getIterationRuntimeContext().getSuperstepNumber();
double diffLogLikelihood = Math.abs(stat.currLogLikelihood - stat.prevLogLikelihood);
LOG.info("step {}, prevLogLikelihood {}, currLogLikelihood {}, diffLogLikelihood {}", stepNo, stat.prevLogLikelihood, stat.currLogLikelihood, diffLogLikelihood);
if (stepNo <= 1 || diffLogLikelihood > tol) {
out.collect(false);
}
}
});
DataSet<Tuple3<Integer, GmmClusterSummary, IterationStatus>> finalModel = loop.closeWith(updatedModel, criterion);
// Output the model.
DataSet<Row> modelRows = finalModel.mapPartition(new RichMapPartitionFunction<Tuple3<Integer, GmmClusterSummary, IterationStatus>, Row>() {
private static final long serialVersionUID = -8411238421923712023L;
transient int featureSize;
@Override
public void open(Configuration parameters) throws Exception {
this.featureSize = (int) (getRuntimeContext().getBroadcastVariable("featureSize").get(0));
}
@Override
public void mapPartition(Iterable<Tuple3<Integer, GmmClusterSummary, IterationStatus>> values, Collector<Row> out) throws Exception {
int numTasks = getRuntimeContext().getNumberOfParallelSubtasks();
if (numTasks > 1) {
throw new RuntimeException("parallelism is not 1 when saving model.");
}
GmmModelData model = new GmmModelData();
model.k = numClusters;
model.dim = featureSize;
model.vectorCol = vectorColName;
model.data = new ArrayList<>(numClusters);
for (Tuple3<Integer, GmmClusterSummary, IterationStatus> t : values) {
t.f1.clusterId = t.f0;
model.data.add(t.f1);
}
new GmmModelDataConverter().save(model, out);
}
}).setParallelism(1).withBroadcastSet(featureSize, "featureSize");
this.setOutput(modelRows, new GmmModelDataConverter().getModelSchema());
return this;
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class SparseData method loadFromRowWithContinues.
@Override
public void loadFromRowWithContinues(List<Row> rawData) {
if (rawData == null) {
return;
}
col = new int[n + 1];
nnz = 0;
if (isRanking) {
rawData.sort((o1, o2) -> ((Comparable) o1.getField(0)).compareTo(o2.getField(0)));
int[] rowAcc = new int[n];
int querySize = 0;
long latestQueryId = 0;
int localCnt = 0;
for (Row localRow : rawData) {
long queryId = ((Number) localRow.getField(0)).longValue();
if (querySize == 0 || latestQueryId != queryId) {
latestQueryId = queryId;
querySize++;
}
Vector vector = VectorUtil.getVector(localRow.getField(1));
if (vector instanceof SparseVector) {
SparseVector sparseVector = (SparseVector) vector;
int[] indices = sparseVector.getIndices();
for (int index : indices) {
rowAcc[index] += 1;
}
nnz += sparseVector.getValues().length;
} else {
DenseVector denseVector = (DenseVector) vector;
double[] vals = denseVector.getData();
for (int i = 0; i < vals.length; ++i) {
rowAcc[i] += 1;
}
nnz += vals.length;
}
labels[localCnt] = ((Number) localRow.getField(localRow.getArity() - 1)).doubleValue();
localCnt++;
}
for (int i = 0; i < n; i++) {
col[i + 1] = rowAcc[i] + col[i];
}
int queryIdOffsetIndex = 0;
queryIdOffset = new int[querySize + 1];
values = new IndexedValue[nnz];
orderedIndices = new Integer[nnz];
Arrays.fill(rowAcc, 0);
localCnt = 0;
for (Row localRow : rawData) {
long queryId = ((Number) localRow.getField(0)).longValue();
if (queryIdOffsetIndex == 0 || latestQueryId != queryId) {
queryIdOffset[queryIdOffsetIndex] = localCnt;
queryIdOffsetIndex++;
if (maxQuerySize < latestQueryId - queryId) {
maxQuerySize = (int) (latestQueryId - queryId);
}
latestQueryId = queryId;
}
Vector vector = VectorUtil.getVector(localRow.getField(1));
if (vector instanceof SparseVector) {
SparseVector sparseVector = (SparseVector) vector;
int[] indices = sparseVector.getIndices();
double[] values = sparseVector.getValues();
for (int j = 0; j < indices.length; ++j) {
int localCol = indices[j];
double localVal = values[j];
boolean isMissing = Preprocessing.isMissing(localVal, featureMetas[localCol], zeroAsMissing);
if (!useMissing && isMissing) {
throw new IllegalArgumentException("Find the missing value in data. " + "Maybe you could open the useMissing to deal with the missing value");
}
if (isMissing && featureMetas[j].getType().equals(FeatureMeta.FeatureType.CATEGORICAL)) {
localVal = featureMetas[localCol].getMissingIndex();
}
this.values[col[localCol] + rowAcc[localCol]] = new IndexedValue(localCnt, localVal);
rowAcc[localCol]++;
}
} else {
DenseVector denseVector = (DenseVector) vector;
double[] values = denseVector.getData();
for (int j = 0; j < values.length; ++j) {
double localVal = values[j];
boolean isMissing = Preprocessing.isMissing(localVal, featureMetas[j], zeroAsMissing);
if (!useMissing && isMissing) {
throw new IllegalArgumentException("Find the missing value in data. " + "Maybe you could open the useMissing to deal with the missing value");
}
if (isMissing && featureMetas[j].getType().equals(FeatureMeta.FeatureType.CATEGORICAL)) {
localVal = featureMetas[j].getMissingIndex();
}
this.values[col[j] + rowAcc[j]] = new IndexedValue(localCnt, localVal);
rowAcc[j]++;
}
}
localCnt++;
}
queryIdOffset[querySize] = localCnt;
} else {
int localCnt = 0;
int[] rowAcc = new int[n];
for (Row localRow : rawData) {
Vector vector = VectorUtil.getVector(localRow.getField(0));
if (vector instanceof SparseVector) {
SparseVector sparseVector = (SparseVector) vector;
int[] indices = sparseVector.getIndices();
for (int index : indices) {
rowAcc[index] += 1;
}
nnz += sparseVector.getValues().length;
} else {
DenseVector denseVector = (DenseVector) vector;
double[] vals = denseVector.getData();
for (int i = 0; i < vals.length; ++i) {
rowAcc[i] += 1;
}
nnz += vals.length;
}
labels[localCnt] = ((Number) localRow.getField(localRow.getArity() - 1)).doubleValue();
localCnt++;
}
for (int i = 0; i < n; i++) {
col[i + 1] = rowAcc[i] + col[i];
}
values = new IndexedValue[nnz];
orderedIndices = new Integer[nnz];
Arrays.fill(rowAcc, 0);
localCnt = 0;
for (Row localRow : rawData) {
Vector vector = VectorUtil.getVector(localRow.getField(0));
if (vector instanceof SparseVector) {
SparseVector sparseVector = (SparseVector) vector;
int[] indices = sparseVector.getIndices();
double[] values = sparseVector.getValues();
for (int j = 0; j < indices.length; ++j) {
int localCol = indices[j];
double localVal = values[j];
boolean isMissing = Preprocessing.isMissing(localVal, featureMetas[j], zeroAsMissing);
if (!useMissing && isMissing) {
throw new IllegalArgumentException("Find the missing value in data. " + "Maybe you could open the useMissing to deal with the missing value");
}
if (isMissing && featureMetas[j].getType().equals(FeatureMeta.FeatureType.CATEGORICAL)) {
localVal = featureMetas[localCol].getMissingIndex();
}
this.values[col[localCol] + rowAcc[localCol]] = new IndexedValue(localCnt, localVal);
rowAcc[localCol]++;
}
} else {
DenseVector denseVector = (DenseVector) vector;
double[] values = denseVector.getData();
for (int j = 0; j < values.length; ++j) {
double localVal = values[j];
boolean isMissing = Preprocessing.isMissing(localVal, featureMetas[j], zeroAsMissing);
if (!useMissing && isMissing) {
throw new IllegalArgumentException("Find the missing value in data. " + "Maybe you could open the useMissing to deal with the missing value");
}
if (isMissing && featureMetas[j].getType().equals(FeatureMeta.FeatureType.CATEGORICAL)) {
localVal = featureMetas[j].getMissingIndex();
}
this.values[col[j] + rowAcc[j]] = new IndexedValue(localCnt, localVal);
rowAcc[j]++;
}
}
localCnt++;
}
}
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class VectorSizeHintTest method pipelineStreamTest.
@Test
public void pipelineStreamTest() throws Exception {
StreamOperator streamOperator = new VectorSizeHint().setSelectedCol("c0").setOutputCol("filter_result").setSize(8).transform((StreamOperator) getData(false));
CollectSinkStreamOp collectSinkStreamOp = new CollectSinkStreamOp().linkFrom(streamOperator);
StreamOperator.execute();
List<Row> result = collectSinkStreamOp.getAndRemoveValues();
result.sort(new RowComparator(0));
assertEquals(VectorUtil.getSparseVector(result.get(0).getField(4)), new SparseVector(8, new int[] { 1, 2, 7 }, new double[] { 2.0, 3.0, 4.3 }));
assertEquals(VectorUtil.getSparseVector(result.get(1).getField(4)), new SparseVector(8, new int[] { 1, 2, 7 }, new double[] { 2.0, 3.0, 4.3 }));
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class FeatureHasherTest method test.
@Test
public void test() throws Exception {
Row[] rows = new Row[] { Row.of(1, 1.1, true, "2", "A"), Row.of(2, 1.1, false, "2", "B"), Row.of(3, 1.1, true, "1", "B"), Row.of(4, 2.2, true, "1", "A") };
List<Row> expectedRows = Arrays.asList(Row.of(1, new SparseVector(100, new int[] { 9, 38, 45, 95 }, new double[] { 1.0, 1.1, 1.0, 1.0 })), Row.of(2, new SparseVector(100, new int[] { 9, 30, 38, 76 }, new double[] { 1.0, 1.0, 1.1, 1.0 })), Row.of(3, new SparseVector(100, new int[] { 11, 38, 76, 95 }, new double[] { 1.0, 1.1, 1.0, 1.0 })), Row.of(4, new SparseVector(100, new int[] { 11, 38, 45, 95 }, new double[] { 1.0, 2.2, 1.0, 1.0 })));
BatchOperator<?> data = new MemSourceBatchOp(rows, new String[] { "id", "double", "bool", "number", "str" });
StreamOperator<?> dataStream = new MemSourceStreamOp(rows, new String[] { "id", "double", "bool", "number", "str" });
FeatureHasher op = new FeatureHasher().setSelectedCols(new String[] { "double", "bool", "number", "str" }).setNumFeatures(100).setOutputCol("features");
List<Row> list = op.transform(data).select("id, features").collect();
assertListRowEqual(expectedRows, list, 0);
CollectSinkStreamOp resStream = op.transform(dataStream).select("id, features").link(new CollectSinkStreamOp());
StreamOperator.execute();
assertListRowEqual(expectedRows, resStream.getAndRemoveValues(), 0);
}
use of com.alibaba.alink.common.linalg.SparseVector in project Alink by alibaba.
the class LibSvmSourceBatchOp method parseLibSvmFormat.
public static Tuple2<Double, Vector> parseLibSvmFormat(String line, int startIndex) {
if (StringUtils.isNullOrWhitespaceOnly(line)) {
return Tuple2.of(null, null);
}
int firstSpacePos = line.indexOf(' ');
if (firstSpacePos < 0) {
return Tuple2.of(Double.valueOf(line), VectorUtil.getVector(""));
}
String labelStr = line.substring(0, firstSpacePos);
String featuresStr = line.substring(firstSpacePos + 1);
Vector featuresVec = VectorUtil.getVector(featuresStr);
if (featuresVec instanceof SparseVector) {
int[] indices = ((SparseVector) featuresVec).getIndices();
for (int i = 0; i < indices.length; i++) {
indices[i] = indices[i] - startIndex;
}
}
return Tuple2.of(Double.valueOf(labelStr), featuresVec);
}
Aggregations