Search in sources :

Example 1 with Result

use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.

the class SimplifiedHierarchyExtractionTest method testHDBSCANResults.

@Test
public void testHDBSCANResults() {
    Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
    Result result = // 
    new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, // 
    50).with(AbstractAlgorithm.ALGORITHM_ID, // 
    HDBSCANLinearMemory.class).with(HDBSCANLinearMemory.Parameterizer.MIN_PTS_ID, // 
    20).build().run(db);
    Clustering<?> clustering = findSingleClustering(result);
    testFMeasure(db, clustering, 0.96941);
    testClusterSizes(clustering, new int[] { 7, 14, 54, 103, 152 });
}
Also used : Database(de.lmu.ifi.dbs.elki.database.Database) HDBSCANLinearMemory(de.lmu.ifi.dbs.elki.algorithm.clustering.hierarchical.HDBSCANLinearMemory) Result(de.lmu.ifi.dbs.elki.result.Result) AbstractClusterAlgorithmTest(de.lmu.ifi.dbs.elki.algorithm.clustering.AbstractClusterAlgorithmTest) Test(org.junit.Test)

Example 2 with Result

use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.

the class SimplifiedHierarchyExtractionTest method testHDBSCANDegenerate.

@Test
public void testHDBSCANDegenerate() {
    Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
    Result result = // 
    new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, // 
    1).with(AbstractAlgorithm.ALGORITHM_ID, // 
    HDBSCANLinearMemory.class).with(HDBSCANLinearMemory.Parameterizer.MIN_PTS_ID, // 
    20).build().run(db);
    Clustering<?> clustering = findSingleClustering(result);
    // minclustersize=1 is useless
    testFMeasure(db, clustering, 0.0182169);
    assertEquals(2 * 330 - 1, clustering.getAllClusters().size());
}
Also used : Database(de.lmu.ifi.dbs.elki.database.Database) HDBSCANLinearMemory(de.lmu.ifi.dbs.elki.algorithm.clustering.hierarchical.HDBSCANLinearMemory) Result(de.lmu.ifi.dbs.elki.result.Result) AbstractClusterAlgorithmTest(de.lmu.ifi.dbs.elki.algorithm.clustering.AbstractClusterAlgorithmTest) Test(org.junit.Test)

Example 3 with Result

use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.

the class SimplifiedHierarchyExtractionTest method testMiniMaxNNResults.

@Test
public void testMiniMaxNNResults() {
    Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
    Result result = // 
    new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, // 
    1).with(AbstractAlgorithm.ALGORITHM_ID, // 
    MiniMaxNNChain.class).build().run(db);
    Clustering<?> clustering = findSingleClustering(result);
    // minclustersize=1 is useless
    testFMeasure(db, clustering, 0.0182169);
    assertEquals(2 * 330 - 1, clustering.getAllClusters().size());
}
Also used : ELKIBuilder(de.lmu.ifi.dbs.elki.utilities.ELKIBuilder) Database(de.lmu.ifi.dbs.elki.database.Database) Result(de.lmu.ifi.dbs.elki.result.Result) AbstractClusterAlgorithmTest(de.lmu.ifi.dbs.elki.algorithm.clustering.AbstractClusterAlgorithmTest) Test(org.junit.Test)

Example 4 with Result

use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.

the class RepresentativeUncertainClustering method run.

/**
 * This run method will do the wrapping.
 *
 * Its called from {@link AbstractAlgorithm#run(Database)} and performs the
 * call to the algorithms particular run method as well as the storing and
 * comparison of the resulting Clusterings.
 *
 * @param database Database
 * @param relation Data relation of uncertain objects
 * @return Clustering result
 */
public Clustering<?> run(Database database, Relation<? extends UncertainObject> relation) {
    ResultHierarchy hierarchy = database.getHierarchy();
    ArrayList<Clustering<?>> clusterings = new ArrayList<>();
    final int dim = RelationUtil.dimensionality(relation);
    DBIDs ids = relation.getDBIDs();
    // To collect samples
    Result samples = new BasicResult("Samples", "samples");
    // Step 1: Cluster sampled possible worlds:
    Random rand = random.getSingleThreadedRandom();
    FiniteProgress sampleP = LOG.isVerbose() ? new FiniteProgress("Clustering samples", numsamples, LOG) : null;
    for (int i = 0; i < numsamples; i++) {
        WritableDataStore<DoubleVector> store = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_DB, DoubleVector.class);
        for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
            store.put(iter, relation.get(iter).drawSample(rand));
        }
        clusterings.add(runClusteringAlgorithm(hierarchy, samples, ids, store, dim, "Sample " + i));
        LOG.incrementProcessed(sampleP);
    }
    LOG.ensureCompleted(sampleP);
    // Step 2: perform the meta clustering (on samples only).
    DBIDRange rids = DBIDFactory.FACTORY.generateStaticDBIDRange(clusterings.size());
    WritableDataStore<Clustering<?>> datastore = DataStoreUtil.makeStorage(rids, DataStoreFactory.HINT_DB, Clustering.class);
    {
        Iterator<Clustering<?>> it2 = clusterings.iterator();
        for (DBIDIter iter = rids.iter(); iter.valid(); iter.advance()) {
            datastore.put(iter, it2.next());
        }
    }
    assert (rids.size() == clusterings.size());
    // Build a relation, and a distance matrix.
    Relation<Clustering<?>> crel = new MaterializedRelation<Clustering<?>>(Clustering.TYPE, rids, "Clusterings", datastore);
    PrecomputedDistanceMatrix<Clustering<?>> mat = new PrecomputedDistanceMatrix<>(crel, rids, distance);
    mat.initialize();
    ProxyDatabase d = new ProxyDatabase(rids, crel);
    d.getHierarchy().add(crel, mat);
    Clustering<?> c = metaAlgorithm.run(d);
    // Detach from database
    d.getHierarchy().remove(d, c);
    // Evaluation
    Result reps = new BasicResult("Representants", "representative");
    hierarchy.add(relation, reps);
    DistanceQuery<Clustering<?>> dq = mat.getDistanceQuery(distance);
    List<? extends Cluster<?>> cl = c.getAllClusters();
    List<DoubleObjPair<Clustering<?>>> evaluated = new ArrayList<>(cl.size());
    for (Cluster<?> clus : cl) {
        double besttau = Double.POSITIVE_INFINITY;
        Clustering<?> bestc = null;
        for (DBIDIter it1 = clus.getIDs().iter(); it1.valid(); it1.advance()) {
            double tau = 0.;
            Clustering<?> curc = crel.get(it1);
            for (DBIDIter it2 = clus.getIDs().iter(); it2.valid(); it2.advance()) {
                if (DBIDUtil.equal(it1, it2)) {
                    continue;
                }
                double di = dq.distance(curc, it2);
                tau = di > tau ? di : tau;
            }
            // Cluster member with the least maximum distance.
            if (tau < besttau) {
                besttau = tau;
                bestc = curc;
            }
        }
        if (bestc == null) {
            // E.g. degenerate empty clusters
            continue;
        }
        // Global tau:
        double gtau = 0.;
        for (DBIDIter it2 = crel.iterDBIDs(); it2.valid(); it2.advance()) {
            double di = dq.distance(bestc, it2);
            gtau = di > gtau ? di : gtau;
        }
        final double cprob = computeConfidence(clus.size(), crel.size());
        // Build an evaluation result
        hierarchy.add(bestc, new RepresentativenessEvaluation(gtau, besttau, cprob));
        evaluated.add(new DoubleObjPair<Clustering<?>>(cprob, bestc));
    }
    // Sort evaluated results by confidence:
    Collections.sort(evaluated, Collections.reverseOrder());
    for (DoubleObjPair<Clustering<?>> pair : evaluated) {
        // Attach parent relation (= sample) to the representative samples.
        for (It<Relation<?>> it = hierarchy.iterParents(pair.second).filter(Relation.class); it.valid(); it.advance()) {
            hierarchy.add(reps, it.get());
        }
    }
    // Add the random samples below the representative results only:
    if (keep) {
        hierarchy.add(relation, samples);
    } else {
        hierarchy.removeSubtree(samples);
    }
    return c;
}
Also used : ArrayList(java.util.ArrayList) Result(de.lmu.ifi.dbs.elki.result.Result) EvaluationResult(de.lmu.ifi.dbs.elki.result.EvaluationResult) BasicResult(de.lmu.ifi.dbs.elki.result.BasicResult) DBIDIter(de.lmu.ifi.dbs.elki.database.ids.DBIDIter) MaterializedRelation(de.lmu.ifi.dbs.elki.database.relation.MaterializedRelation) MaterializedRelation(de.lmu.ifi.dbs.elki.database.relation.MaterializedRelation) Relation(de.lmu.ifi.dbs.elki.database.relation.Relation) Random(java.util.Random) BasicResult(de.lmu.ifi.dbs.elki.result.BasicResult) Iterator(java.util.Iterator) ResultHierarchy(de.lmu.ifi.dbs.elki.result.ResultHierarchy) DBIDs(de.lmu.ifi.dbs.elki.database.ids.DBIDs) FiniteProgress(de.lmu.ifi.dbs.elki.logging.progress.FiniteProgress) ProxyDatabase(de.lmu.ifi.dbs.elki.database.ProxyDatabase) PrecomputedDistanceMatrix(de.lmu.ifi.dbs.elki.index.distancematrix.PrecomputedDistanceMatrix) Clustering(de.lmu.ifi.dbs.elki.data.Clustering) DoubleObjPair(de.lmu.ifi.dbs.elki.utilities.pairs.DoubleObjPair) DBIDRange(de.lmu.ifi.dbs.elki.database.ids.DBIDRange) DoubleVector(de.lmu.ifi.dbs.elki.data.DoubleVector)

Example 5 with Result

use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.

the class OutputTabPanel method executeStep.

@Override
protected void executeStep() {
    if (input.canRun() && !input.isComplete()) {
        input.execute();
    }
    if (evals.canRun() && !evals.isComplete()) {
        evals.execute();
    }
    if (!input.isComplete()) {
        throw new AbortException("Input data not available.");
    }
    if (!evals.isComplete()) {
        throw new AbortException("Evaluation failed.");
    }
    // Get the database and run the algorithms
    Database database = input.getInputStep().getDatabase();
    outs.runResultHandlers(database.getHierarchy(), database);
    Result eres = evals.getEvaluationStep().getResult();
    basedOnResult = new WeakReference<Object>(eres);
}
Also used : Database(de.lmu.ifi.dbs.elki.database.Database) AbortException(de.lmu.ifi.dbs.elki.utilities.exceptions.AbortException) Result(de.lmu.ifi.dbs.elki.result.Result)

Aggregations

Result (de.lmu.ifi.dbs.elki.result.Result)11 Database (de.lmu.ifi.dbs.elki.database.Database)6 AbstractClusterAlgorithmTest (de.lmu.ifi.dbs.elki.algorithm.clustering.AbstractClusterAlgorithmTest)3 DBIDIter (de.lmu.ifi.dbs.elki.database.ids.DBIDIter)3 ResultHierarchy (de.lmu.ifi.dbs.elki.result.ResultHierarchy)3 Test (org.junit.Test)3 AbstractAlgorithm (de.lmu.ifi.dbs.elki.algorithm.AbstractAlgorithm)2 Algorithm (de.lmu.ifi.dbs.elki.algorithm.Algorithm)2 HDBSCANLinearMemory (de.lmu.ifi.dbs.elki.algorithm.clustering.hierarchical.HDBSCANLinearMemory)2 Clustering (de.lmu.ifi.dbs.elki.data.Clustering)2 WritableDoubleDataStore (de.lmu.ifi.dbs.elki.database.datastore.WritableDoubleDataStore)2 DoubleRelation (de.lmu.ifi.dbs.elki.database.relation.DoubleRelation)2 MaterializedDoubleRelation (de.lmu.ifi.dbs.elki.database.relation.MaterializedDoubleRelation)2 FiniteProgress (de.lmu.ifi.dbs.elki.logging.progress.FiniteProgress)2 DoubleMinMax (de.lmu.ifi.dbs.elki.math.DoubleMinMax)2 BasicResult (de.lmu.ifi.dbs.elki.result.BasicResult)2 EvaluationResult (de.lmu.ifi.dbs.elki.result.EvaluationResult)2 BasicOutlierScoreMeta (de.lmu.ifi.dbs.elki.result.outlier.BasicOutlierScoreMeta)2 OutlierResult (de.lmu.ifi.dbs.elki.result.outlier.OutlierResult)2 OutlierScoreMeta (de.lmu.ifi.dbs.elki.result.outlier.OutlierScoreMeta)2