use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.
the class SimplifiedHierarchyExtractionTest method testHDBSCANResults.
@Test
public void testHDBSCANResults() {
Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
Result result = //
new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, //
50).with(AbstractAlgorithm.ALGORITHM_ID, //
HDBSCANLinearMemory.class).with(HDBSCANLinearMemory.Parameterizer.MIN_PTS_ID, //
20).build().run(db);
Clustering<?> clustering = findSingleClustering(result);
testFMeasure(db, clustering, 0.96941);
testClusterSizes(clustering, new int[] { 7, 14, 54, 103, 152 });
}
use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.
the class SimplifiedHierarchyExtractionTest method testHDBSCANDegenerate.
@Test
public void testHDBSCANDegenerate() {
Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
Result result = //
new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, //
1).with(AbstractAlgorithm.ALGORITHM_ID, //
HDBSCANLinearMemory.class).with(HDBSCANLinearMemory.Parameterizer.MIN_PTS_ID, //
20).build().run(db);
Clustering<?> clustering = findSingleClustering(result);
// minclustersize=1 is useless
testFMeasure(db, clustering, 0.0182169);
assertEquals(2 * 330 - 1, clustering.getAllClusters().size());
}
use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.
the class SimplifiedHierarchyExtractionTest method testMiniMaxNNResults.
@Test
public void testMiniMaxNNResults() {
Database db = makeSimpleDatabase(UNITTEST + "3clusters-and-noise-2d.csv", 330);
Result result = //
new ELKIBuilder<>(SimplifiedHierarchyExtraction.class).with(SimplifiedHierarchyExtraction.Parameterizer.MINCLUSTERSIZE_ID, //
1).with(AbstractAlgorithm.ALGORITHM_ID, //
MiniMaxNNChain.class).build().run(db);
Clustering<?> clustering = findSingleClustering(result);
// minclustersize=1 is useless
testFMeasure(db, clustering, 0.0182169);
assertEquals(2 * 330 - 1, clustering.getAllClusters().size());
}
use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.
the class RepresentativeUncertainClustering method run.
/**
* This run method will do the wrapping.
*
* Its called from {@link AbstractAlgorithm#run(Database)} and performs the
* call to the algorithms particular run method as well as the storing and
* comparison of the resulting Clusterings.
*
* @param database Database
* @param relation Data relation of uncertain objects
* @return Clustering result
*/
public Clustering<?> run(Database database, Relation<? extends UncertainObject> relation) {
ResultHierarchy hierarchy = database.getHierarchy();
ArrayList<Clustering<?>> clusterings = new ArrayList<>();
final int dim = RelationUtil.dimensionality(relation);
DBIDs ids = relation.getDBIDs();
// To collect samples
Result samples = new BasicResult("Samples", "samples");
// Step 1: Cluster sampled possible worlds:
Random rand = random.getSingleThreadedRandom();
FiniteProgress sampleP = LOG.isVerbose() ? new FiniteProgress("Clustering samples", numsamples, LOG) : null;
for (int i = 0; i < numsamples; i++) {
WritableDataStore<DoubleVector> store = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_DB, DoubleVector.class);
for (DBIDIter iter = ids.iter(); iter.valid(); iter.advance()) {
store.put(iter, relation.get(iter).drawSample(rand));
}
clusterings.add(runClusteringAlgorithm(hierarchy, samples, ids, store, dim, "Sample " + i));
LOG.incrementProcessed(sampleP);
}
LOG.ensureCompleted(sampleP);
// Step 2: perform the meta clustering (on samples only).
DBIDRange rids = DBIDFactory.FACTORY.generateStaticDBIDRange(clusterings.size());
WritableDataStore<Clustering<?>> datastore = DataStoreUtil.makeStorage(rids, DataStoreFactory.HINT_DB, Clustering.class);
{
Iterator<Clustering<?>> it2 = clusterings.iterator();
for (DBIDIter iter = rids.iter(); iter.valid(); iter.advance()) {
datastore.put(iter, it2.next());
}
}
assert (rids.size() == clusterings.size());
// Build a relation, and a distance matrix.
Relation<Clustering<?>> crel = new MaterializedRelation<Clustering<?>>(Clustering.TYPE, rids, "Clusterings", datastore);
PrecomputedDistanceMatrix<Clustering<?>> mat = new PrecomputedDistanceMatrix<>(crel, rids, distance);
mat.initialize();
ProxyDatabase d = new ProxyDatabase(rids, crel);
d.getHierarchy().add(crel, mat);
Clustering<?> c = metaAlgorithm.run(d);
// Detach from database
d.getHierarchy().remove(d, c);
// Evaluation
Result reps = new BasicResult("Representants", "representative");
hierarchy.add(relation, reps);
DistanceQuery<Clustering<?>> dq = mat.getDistanceQuery(distance);
List<? extends Cluster<?>> cl = c.getAllClusters();
List<DoubleObjPair<Clustering<?>>> evaluated = new ArrayList<>(cl.size());
for (Cluster<?> clus : cl) {
double besttau = Double.POSITIVE_INFINITY;
Clustering<?> bestc = null;
for (DBIDIter it1 = clus.getIDs().iter(); it1.valid(); it1.advance()) {
double tau = 0.;
Clustering<?> curc = crel.get(it1);
for (DBIDIter it2 = clus.getIDs().iter(); it2.valid(); it2.advance()) {
if (DBIDUtil.equal(it1, it2)) {
continue;
}
double di = dq.distance(curc, it2);
tau = di > tau ? di : tau;
}
// Cluster member with the least maximum distance.
if (tau < besttau) {
besttau = tau;
bestc = curc;
}
}
if (bestc == null) {
// E.g. degenerate empty clusters
continue;
}
// Global tau:
double gtau = 0.;
for (DBIDIter it2 = crel.iterDBIDs(); it2.valid(); it2.advance()) {
double di = dq.distance(bestc, it2);
gtau = di > gtau ? di : gtau;
}
final double cprob = computeConfidence(clus.size(), crel.size());
// Build an evaluation result
hierarchy.add(bestc, new RepresentativenessEvaluation(gtau, besttau, cprob));
evaluated.add(new DoubleObjPair<Clustering<?>>(cprob, bestc));
}
// Sort evaluated results by confidence:
Collections.sort(evaluated, Collections.reverseOrder());
for (DoubleObjPair<Clustering<?>> pair : evaluated) {
// Attach parent relation (= sample) to the representative samples.
for (It<Relation<?>> it = hierarchy.iterParents(pair.second).filter(Relation.class); it.valid(); it.advance()) {
hierarchy.add(reps, it.get());
}
}
// Add the random samples below the representative results only:
if (keep) {
hierarchy.add(relation, samples);
} else {
hierarchy.removeSubtree(samples);
}
return c;
}
use of de.lmu.ifi.dbs.elki.result.Result in project elki by elki-project.
the class OutputTabPanel method executeStep.
@Override
protected void executeStep() {
if (input.canRun() && !input.isComplete()) {
input.execute();
}
if (evals.canRun() && !evals.isComplete()) {
evals.execute();
}
if (!input.isComplete()) {
throw new AbortException("Input data not available.");
}
if (!evals.isComplete()) {
throw new AbortException("Evaluation failed.");
}
// Get the database and run the algorithms
Database database = input.getInputStep().getDatabase();
outs.runResultHandlers(database.getHierarchy(), database);
Result eres = evals.getEvaluationStep().getResult();
basedOnResult = new WeakReference<Object>(eres);
}
Aggregations