use of de.lmu.ifi.dbs.elki.math.linearalgebra.LUDecomposition in project elki by elki-project.
the class GaussianUniformMixture method loglikelihoodNormal.
/**
* Computes the loglikelihood of all normal objects. Gaussian model
*
* @param objids Object IDs for 'normal' objects.
* @param relation Database
* @return loglikelihood for normal objects
*/
private double loglikelihoodNormal(DBIDs objids, Relation<V> relation) {
if (objids.isEmpty()) {
return 0;
}
CovarianceMatrix builder = CovarianceMatrix.make(relation, objids);
double[] mean = builder.getMeanVector();
double[][] covarianceMatrix = builder.destroyToSampleMatrix();
// test singulaere matrix
double[][] covInv = inverse(covarianceMatrix);
double covarianceDet = new LUDecomposition(covarianceMatrix).det();
double fakt = 1.0 / FastMath.sqrt(MathUtil.powi(MathUtil.TWOPI, RelationUtil.dimensionality(relation)) * covarianceDet);
// for each object compute probability and sum
double prob = 0;
for (DBIDIter iter = objids.iter(); iter.valid(); iter.advance()) {
double[] x = minusEquals(relation.get(iter).toArray(), mean);
double mDist = transposeTimesTimes(x, covInv, x);
prob += FastMath.log(fakt * FastMath.exp(-mDist * .5));
}
return prob;
}
use of de.lmu.ifi.dbs.elki.math.linearalgebra.LUDecomposition in project elki by elki-project.
the class GaussianModel method run.
/**
* Run the algorithm
*
* @param relation Data relation
* @return Outlier result
*/
public OutlierResult run(Relation<V> relation) {
DoubleMinMax mm = new DoubleMinMax();
// resulting scores
WritableDoubleDataStore oscores = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_TEMP | DataStoreFactory.HINT_HOT);
// Compute mean and covariance Matrix
CovarianceMatrix temp = CovarianceMatrix.make(relation);
double[] mean = temp.getMeanVector(relation).toArray();
// debugFine(mean.toString());
double[][] covarianceMatrix = temp.destroyToPopulationMatrix();
// debugFine(covarianceMatrix.toString());
double[][] covarianceTransposed = inverse(covarianceMatrix);
// Normalization factors for Gaussian PDF
double det = new LUDecomposition(covarianceMatrix).det();
final double fakt = 1.0 / FastMath.sqrt(MathUtil.powi(MathUtil.TWOPI, RelationUtil.dimensionality(relation)) * det);
// for each object compute Mahalanobis distance
for (DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
double[] x = minusEquals(relation.get(iditer).toArray(), mean);
// Gaussian PDF
final double mDist = transposeTimesTimes(x, covarianceTransposed, x);
final double prob = fakt * FastMath.exp(-mDist * .5);
mm.put(prob);
oscores.putDouble(iditer, prob);
}
final OutlierScoreMeta meta;
if (invert) {
double max = mm.getMax() != 0 ? mm.getMax() : 1.;
for (DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
oscores.putDouble(iditer, (max - oscores.doubleValue(iditer)) / max);
}
meta = new BasicOutlierScoreMeta(0.0, 1.0);
} else {
meta = new InvertedOutlierScoreMeta(mm.getMin(), mm.getMax(), 0.0, Double.POSITIVE_INFINITY);
}
DoubleRelation res = new MaterializedDoubleRelation("Gaussian Model Outlier Score", "gaussian-model-outlier", oscores, relation.getDBIDs());
return new OutlierResult(meta, res);
}
use of de.lmu.ifi.dbs.elki.math.linearalgebra.LUDecomposition in project elki by elki-project.
the class LinearDiscriminantAnalysisFilter method computeProjectionMatrix.
@Override
protected double[][] computeProjectionMatrix(List<V> vectorcolumn, List<? extends ClassLabel> classcolumn, int dim) {
Map<ClassLabel, IntList> classes = partition(classcolumn);
// Fix indexing of classes:
List<ClassLabel> keys = new ArrayList<>(classes.keySet());
// Compute centroids:
List<Centroid> centroids = computeCentroids(dim, vectorcolumn, keys, classes);
final double[][] sigmaB, sigmaI;
// Between classes covariance:
{
CovarianceMatrix covmake = new CovarianceMatrix(dim);
for (Centroid c : centroids) {
covmake.put(c);
}
sigmaB = covmake.destroyToSampleMatrix();
}
{
// (Average) within class variance:
CovarianceMatrix covmake = new CovarianceMatrix(dim);
int numc = keys.size();
for (int i = 0; i < numc; i++) {
double[] c = centroids.get(i).getArrayRef();
// TODO: different weighting strategies? Sampling?
for (IntIterator it = classes.get(keys.get(i)).iterator(); it.hasNext(); ) {
covmake.put(minusEquals(vectorcolumn.get(it.nextInt()).toArray(), c));
}
}
sigmaI = covmake.destroyToSampleMatrix();
if (new LUDecomposition(sigmaI).det() == 0) {
for (int i = 0; i < dim; i++) {
sigmaI[i][i] += 1e-10;
}
}
}
double[][] sol = times(inverse(sigmaI), sigmaB);
EigenvalueDecomposition decomp = new EigenvalueDecomposition(sol);
SortedEigenPairs sorted = new SortedEigenPairs(decomp, false);
return transpose(sorted.eigenVectors(tdim));
}
Aggregations