use of de.lmu.ifi.dbs.elki.data.synthetic.bymodel.GeneratorSingleCluster in project elki by elki-project.
the class GeneratorXMLDatabaseConnection method processElementCluster.
/**
* Process a 'cluster' Element in the XML stream.
*
* @param gen Generator
* @param cur Current document nod
*/
private void processElementCluster(GeneratorMain gen, Node cur) {
int size = -1;
double overweight = 1.0;
String sizestr = ((Element) cur).getAttribute(ATTR_SIZE);
if (sizestr != null && sizestr.length() > 0) {
size = (int) (ParseUtil.parseIntBase10(sizestr) * sizescale);
}
String name = ((Element) cur).getAttribute(ATTR_NAME);
String dcostr = ((Element) cur).getAttribute(ATTR_DENSITY);
if (dcostr != null && dcostr.length() > 0) {
overweight = ParseUtil.parseDouble(dcostr);
}
if (size < 0) {
throw new AbortException("No valid cluster size given in specification file.");
}
if (name == null || name.length() == 0) {
throw new AbortException("No cluster name given in specification file.");
}
// *** add new cluster object
Random newRand = clusterRandom.getSingleThreadedRandom();
GeneratorSingleCluster cluster = new GeneratorSingleCluster(name, size, overweight, newRand);
// TODO: check for unknown attributes.
XMLNodeIterator iter = new XMLNodeIterator(cur.getFirstChild());
while (iter.hasNext()) {
Node child = iter.next();
if (TAG_UNIFORM.equals(child.getNodeName())) {
processElementUniform(cluster, child);
} else if (TAG_NORMAL.equals(child.getNodeName())) {
processElementNormal(cluster, child);
} else if (TAG_GAMMA.equals(child.getNodeName())) {
processElementGamma(cluster, child);
} else if (TAG_HALTON.equals(child.getNodeName())) {
processElementHalton(cluster, child);
} else if (TAG_ROTATE.equals(child.getNodeName())) {
processElementRotate(cluster, child);
} else if (TAG_TRANSLATE.equals(child.getNodeName())) {
processElementTranslate(cluster, child);
} else if (TAG_CLIP.equals(child.getNodeName())) {
processElementClipping(cluster, child);
} else if (child.getNodeType() == Node.ELEMENT_NODE) {
LOG.warning("Unknown element in XML specification file: " + child.getNodeName());
}
}
gen.addCluster(cluster);
}
use of de.lmu.ifi.dbs.elki.data.synthetic.bymodel.GeneratorSingleCluster in project elki by elki-project.
the class GeneratorXMLSpec method writeClusters.
/**
* Write the resulting clusters to an output stream.
*
* @param outStream output stream
* @param data Generated data
* @throws IOException thrown on write errors
*/
public void writeClusters(OutputStreamWriter outStream, MultipleObjectsBundle data) throws IOException {
int modelcol = -1;
{
// Find model column
for (int i = 0; i < data.metaLength(); i++) {
if (Model.TYPE.isAssignableFromType(data.meta(i))) {
modelcol = i;
break;
}
}
}
if (modelcol < 0) {
throw new AbortException("No model column found in bundle.");
}
ArrayList<Model> models = new ArrayList<>();
Map<Model, IntArrayList> modelMap = new HashMap<>();
{
// Build a map from model to the actual objects
for (int i = 0; i < data.dataLength(); i++) {
Model model = (Model) data.data(i, modelcol);
IntArrayList modelids = modelMap.get(model);
if (modelids == null) {
models.add(model);
modelids = new IntArrayList();
modelMap.put(model, modelids);
}
modelids.add(i);
}
}
// compute global discard values
int totalsize = 0, totaldisc = 0;
for (Entry<Model, IntArrayList> ent : modelMap.entrySet()) {
totalsize += ent.getValue().size();
if (ent.getKey() instanceof GeneratorSingleCluster) {
totaldisc += ((GeneratorSingleCluster) ent.getKey()).getDiscarded();
}
}
double globdens = (double) (totalsize + totaldisc) / totalsize;
outStream.append("########################################################").append(LINE_SEPARATOR);
outStream.append("## Number of clusters: " + models.size()).append(LINE_SEPARATOR);
for (Model model : models) {
IntArrayList ids = modelMap.get(model);
outStream.append("########################################################").append(LINE_SEPARATOR);
outStream.append("## Size: " + ids.size()).append(LINE_SEPARATOR);
if (model instanceof GeneratorSingleCluster) {
GeneratorSingleCluster cursclus = (GeneratorSingleCluster) model;
outStream.append("########################################################").append(LINE_SEPARATOR);
outStream.append("## Cluster: ").append(cursclus.getName()).append(LINE_SEPARATOR);
double[] cmin = cursclus.getClipmin();
double[] cmax = cursclus.getClipmax();
if (cmin != null && cmax != null) {
//
outStream.append("## Clipping: ").append(FormatUtil.format(cmin)).append(" - ").append(FormatUtil.format(cmax)).append(LINE_SEPARATOR);
}
outStream.append("## Density correction factor: " + cursclus.getDensityCorrection()).append(LINE_SEPARATOR);
outStream.append("## Generators:").append(LINE_SEPARATOR);
for (int i = 0; i < cursclus.getDim(); i++) {
Distribution gen = cursclus.getDistribution(i);
outStream.append("## ").append(gen.toString()).append(LINE_SEPARATOR);
}
if (cursclus.getTransformation() != null && cursclus.getTransformation().getTransformation() != null) {
outStream.append("## Affine transformation matrix:").append(LINE_SEPARATOR);
outStream.append(FormatUtil.format(cursclus.getTransformation().getTransformation(), "## ")).append(LINE_SEPARATOR);
}
outStream.append("## Discards: " + cursclus.getDiscarded() + " Retries left: " + cursclus.getRetries()).append(LINE_SEPARATOR);
double corf = /* cursclus.overweight */
(double) (cursclus.getSize() + cursclus.getDiscarded()) / cursclus.getSize() / globdens;
outStream.append("## Density correction factor estimation: " + corf).append(LINE_SEPARATOR);
}
outStream.append("########################################################").append(LINE_SEPARATOR);
for (IntIterator iter = ids.iterator(); iter.hasNext(); ) {
int num = iter.nextInt();
for (int c = 0; c < data.metaLength(); c++) {
if (c != modelcol) {
if (c > 0) {
outStream.append(' ');
}
outStream.append(data.data(num, c).toString());
}
}
outStream.append(LINE_SEPARATOR);
}
}
}
use of de.lmu.ifi.dbs.elki.data.synthetic.bymodel.GeneratorSingleCluster in project elki by elki-project.
the class TrivialGeneratedOutlier method run.
/**
* Run the algorithm
*
* @param models Model relation
* @param vecs Vector relation
* @param labels Label relation
* @return Outlier result
*/
public OutlierResult run(Relation<Model> models, Relation<NumberVector> vecs, Relation<?> labels) {
WritableDoubleDataStore scores = DataStoreUtil.makeDoubleStorage(models.getDBIDs(), DataStoreFactory.HINT_HOT);
HashSet<GeneratorSingleCluster> generators = new HashSet<>();
for (DBIDIter iditer = models.iterDBIDs(); iditer.valid(); iditer.advance()) {
Model model = models.get(iditer);
if (model instanceof GeneratorSingleCluster) {
generators.add((GeneratorSingleCluster) model);
}
}
if (generators.isEmpty()) {
LOG.warning("No generator models found for dataset - all points will be considered outliers.");
}
for (GeneratorSingleCluster gen : generators) {
for (int i = 0; i < gen.getDim(); i++) {
Distribution dist = gen.getDistribution(i);
if (!(dist instanceof NormalDistribution)) {
throw new AbortException("TrivialGeneratedOutlier currently only supports normal distributions, got: " + dist);
}
}
}
for (DBIDIter iditer = models.iterDBIDs(); iditer.valid(); iditer.advance()) {
double score = 1.;
double[] v = vecs.get(iditer).toArray();
for (GeneratorSingleCluster gen : generators) {
double[] tv = v;
// Transform backwards
if (gen.getTransformation() != null) {
tv = gen.getTransformation().applyInverse(v);
}
final int dim = tv.length;
double lensq = 0.0;
int norm = 0;
for (int i = 0; i < dim; i++) {
Distribution dist = gen.getDistribution(i);
if (dist instanceof NormalDistribution) {
NormalDistribution d = (NormalDistribution) dist;
double delta = (tv[i] - d.getMean()) / d.getStddev();
lensq += delta * delta;
norm += 1;
} else {
throw new AbortException("TrivialGeneratedOutlier currently only supports normal distributions, got: " + dist);
}
}
if (norm > 0.) {
// The squared distances are ChiSquared distributed
score = Math.min(score, ChiSquaredDistribution.cdf(lensq, norm));
} else {
score = 0.;
}
}
if (expect < 1) {
score = expect * score / (1 - score + expect);
}
scores.putDouble(iditer, score);
}
DoubleRelation scoreres = new MaterializedDoubleRelation("Model outlier scores", "model-outlier", scores, models.getDBIDs());
OutlierScoreMeta meta = new ProbabilisticOutlierScore(0., 1.);
return new OutlierResult(meta, scoreres);
}
Aggregations