Search in sources :

Example 1 with Tokenizer

use of de.lmu.ifi.dbs.elki.utilities.io.Tokenizer in project elki by elki-project.

the class AbstractDistributionEstimatorTest method load.

protected void load(String name) {
    data = new HashMap<>();
    try (// 
    InputStream in = new GZIPInputStream(AbstractDistributionTest.class.getResourceAsStream(name));
        TokenizedReader reader = new TokenizedReader(Pattern.compile(" "), "\"", Pattern.compile("^\\s*#.*"))) {
        Tokenizer t = reader.getTokenizer();
        DoubleArray buf = new DoubleArray();
        reader.reset(in);
        while (reader.nextLineExceptComments()) {
            assertTrue(t.valid());
            String key = t.getStrippedSubstring();
            buf.clear();
            for (t.advance(); t.valid(); t.advance()) {
                buf.add(t.getDouble());
            }
            data.put(key, buf.toArray());
        }
    } catch (IOException e) {
        fail("Cannot load data.");
    }
}
Also used : GZIPInputStream(java.util.zip.GZIPInputStream) AbstractDistributionTest(de.lmu.ifi.dbs.elki.math.statistics.distribution.AbstractDistributionTest) GZIPInputStream(java.util.zip.GZIPInputStream) InputStream(java.io.InputStream) TokenizedReader(de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader) IOException(java.io.IOException) Tokenizer(de.lmu.ifi.dbs.elki.utilities.io.Tokenizer) DoubleArray(de.lmu.ifi.dbs.elki.utilities.datastructures.arraylike.DoubleArray)

Example 2 with Tokenizer

use of de.lmu.ifi.dbs.elki.utilities.io.Tokenizer in project elki by elki-project.

the class ExternalDoubleOutlierScore method run.

/**
 * Run the algorithm.
 *
 * @param database Database to use
 * @param relation Relation to use
 * @return Result
 */
public OutlierResult run(Database database, Relation<?> relation) {
    WritableDoubleDataStore scores = DataStoreUtil.makeDoubleStorage(relation.getDBIDs(), DataStoreFactory.HINT_STATIC);
    DoubleMinMax minmax = new DoubleMinMax();
    try (// 
    InputStream in = FileUtil.tryGzipInput(new FileInputStream(file));
        TokenizedReader reader = CSVReaderFormat.DEFAULT_FORMAT.makeReader()) {
        Tokenizer tokenizer = reader.getTokenizer();
        CharSequence buf = reader.getBuffer();
        Matcher mi = idpattern.matcher(buf), ms = scorepattern.matcher(buf);
        reader.reset(in);
        while (reader.nextLineExceptComments()) {
            Integer id = null;
            double score = Double.NaN;
            for (; /* initialized by nextLineExceptComments */
            tokenizer.valid(); tokenizer.advance()) {
                mi.region(tokenizer.getStart(), tokenizer.getEnd());
                ms.region(tokenizer.getStart(), tokenizer.getEnd());
                final boolean mif = mi.find();
                final boolean msf = ms.find();
                if (mif && msf) {
                    throw new AbortException("ID pattern and score pattern both match value: " + tokenizer.getSubstring());
                }
                if (mif) {
                    if (id != null) {
                        throw new AbortException("ID pattern matched twice: previous value " + id + " second value: " + tokenizer.getSubstring());
                    }
                    id = ParseUtil.parseIntBase10(buf, mi.end(), tokenizer.getEnd());
                }
                if (msf) {
                    if (!Double.isNaN(score)) {
                        throw new AbortException("Score pattern matched twice: previous value " + score + " second value: " + tokenizer.getSubstring());
                    }
                    score = ParseUtil.parseDouble(buf, ms.end(), tokenizer.getEnd());
                }
            }
            if (id != null && !Double.isNaN(score)) {
                scores.putDouble(DBIDUtil.importInteger(id), score);
                minmax.put(score);
            } else if (id == null && Double.isNaN(score)) {
                LOG.warning("Line did not match either ID nor score nor comment: " + reader.getLineNumber());
            } else {
                throw new AbortException("Line matched only ID or only SCORE patterns: " + reader.getLineNumber());
            }
        }
    } catch (IOException e) {
        throw new AbortException("Could not load outlier scores: " + e.getMessage() + " when loading " + file, e);
    }
    OutlierScoreMeta meta;
    if (inverted) {
        meta = new InvertedOutlierScoreMeta(minmax.getMin(), minmax.getMax());
    } else {
        meta = new BasicOutlierScoreMeta(minmax.getMin(), minmax.getMax());
    }
    DoubleRelation scoresult = new MaterializedDoubleRelation("External Outlier", "external-outlier", scores, relation.getDBIDs());
    OutlierResult or = new OutlierResult(meta, scoresult);
    // Apply scaling
    if (scaling instanceof OutlierScalingFunction) {
        ((OutlierScalingFunction) scaling).prepare(or);
    }
    DoubleMinMax mm = new DoubleMinMax();
    for (DBIDIter iditer = relation.iterDBIDs(); iditer.valid(); iditer.advance()) {
        double val = scoresult.doubleValue(iditer);
        val = scaling.getScaled(val);
        scores.putDouble(iditer, val);
        mm.put(val);
    }
    meta = new BasicOutlierScoreMeta(mm.getMin(), mm.getMax());
    or = new OutlierResult(meta, scoresult);
    return or;
}
Also used : WritableDoubleDataStore(de.lmu.ifi.dbs.elki.database.datastore.WritableDoubleDataStore) Matcher(java.util.regex.Matcher) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) OutlierScalingFunction(de.lmu.ifi.dbs.elki.utilities.scaling.outlier.OutlierScalingFunction) OutlierResult(de.lmu.ifi.dbs.elki.result.outlier.OutlierResult) InvertedOutlierScoreMeta(de.lmu.ifi.dbs.elki.result.outlier.InvertedOutlierScoreMeta) IOException(java.io.IOException) DoubleRelation(de.lmu.ifi.dbs.elki.database.relation.DoubleRelation) MaterializedDoubleRelation(de.lmu.ifi.dbs.elki.database.relation.MaterializedDoubleRelation) FileInputStream(java.io.FileInputStream) BasicOutlierScoreMeta(de.lmu.ifi.dbs.elki.result.outlier.BasicOutlierScoreMeta) OutlierScoreMeta(de.lmu.ifi.dbs.elki.result.outlier.OutlierScoreMeta) InvertedOutlierScoreMeta(de.lmu.ifi.dbs.elki.result.outlier.InvertedOutlierScoreMeta) BasicOutlierScoreMeta(de.lmu.ifi.dbs.elki.result.outlier.BasicOutlierScoreMeta) DBIDIter(de.lmu.ifi.dbs.elki.database.ids.DBIDIter) DoubleMinMax(de.lmu.ifi.dbs.elki.math.DoubleMinMax) TokenizedReader(de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader) Tokenizer(de.lmu.ifi.dbs.elki.utilities.io.Tokenizer) MaterializedDoubleRelation(de.lmu.ifi.dbs.elki.database.relation.MaterializedDoubleRelation) AbortException(de.lmu.ifi.dbs.elki.utilities.exceptions.AbortException)

Example 3 with Tokenizer

use of de.lmu.ifi.dbs.elki.utilities.io.Tokenizer in project elki by elki-project.

the class KernelDensityFittingTest method testFitDoubleArray.

/**
 * The test will load the given data set and perform a Levenberq-Marquadt
 * fitting on a kernelized density estimation. The test evaluates the fitting
 * quality to ensure that the results remain stable and significantly better
 * than traditional estimation.
 */
@Test
public final void testFitDoubleArray() throws IOException {
    DoubleArray data = new DoubleArray();
    try (InputStream in = new GZIPInputStream(getClass().getResourceAsStream(dataset));
        TokenizedReader reader = new TokenizedReader(Pattern.compile(" "), "\"", Pattern.compile("^\\s*#.*"))) {
        Tokenizer t = reader.getTokenizer();
        reader.reset(in);
        while (reader.nextLineExceptComments() && t.valid()) {
            // Read first column only
            data.add(t.getDouble());
        }
    }
    // verify data set size.
    assertEquals("Data set size doesn't match parameters.", realsize, data.size());
    double splitval = 0.5;
    double[] fulldata = data.toArray();
    Arrays.sort(fulldata);
    // Check that the initial parameters match what we were expecting from the
    // data.
    double[] fullparams = estimateInitialParameters(fulldata);
    assertEquals("Full Mean before fitting", 0.4446105, fullparams[0], 0.0001);
    assertEquals("Full Stddev before fitting", 1.4012001, fullparams[1], 0.0001);
    // Do a fit using only part of the data and check the results are right.
    double[] fullfit = run(fulldata, fullparams);
    assertEquals("Full Mean after fitting", 0.64505, fullfit[0], 0.01);
    assertEquals("Full Stddev after fitting", 1.5227889, fullfit[1], 0.01);
    int splitpoint = 0;
    while (fulldata[splitpoint] < splitval && splitpoint < fulldata.length) {
        splitpoint++;
    }
    double[] halfdata = Arrays.copyOf(fulldata, splitpoint);
    // Check that the initial parameters match what we were expecting from the
    // data.
    double[] params = estimateInitialParameters(halfdata);
    assertEquals("Mean before fitting", -0.65723044, params[0], 0.0001);
    assertEquals("Stddev before fitting", 1.0112391, params[1], 0.0001);
    // Do a fit using only part of the data and check the results are right.
    double[] ps = run(halfdata, params);
    assertEquals("Mean after fitting", 0.45980, ps[0], 0.01);
    assertEquals("Stddev after fitting", 1.320427, ps[1], 0.01);
}
Also used : GZIPInputStream(java.util.zip.GZIPInputStream) GZIPInputStream(java.util.zip.GZIPInputStream) InputStream(java.io.InputStream) TokenizedReader(de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader) DoubleArray(de.lmu.ifi.dbs.elki.utilities.datastructures.arraylike.DoubleArray) Tokenizer(de.lmu.ifi.dbs.elki.utilities.io.Tokenizer) Test(org.junit.Test)

Example 4 with Tokenizer

use of de.lmu.ifi.dbs.elki.utilities.io.Tokenizer in project elki by elki-project.

the class AbstractDistributionTest method load.

protected void load(String name) {
    data = new HashMap<>();
    try (// 
    InputStream in = new GZIPInputStream(getClass().getResourceAsStream(name));
        TokenizedReader reader = new TokenizedReader(Pattern.compile(" "), "\"", Pattern.compile("^\\s*#.*"))) {
        Tokenizer t = reader.getTokenizer();
        DoubleArray buf = new DoubleArray();
        reader.reset(in);
        while (reader.nextLineExceptComments()) {
            assertTrue(t.valid());
            String key = t.getStrippedSubstring();
            buf.clear();
            for (t.advance(); t.valid(); t.advance()) {
                buf.add(t.getDouble());
            }
            data.put(key, buf.toArray());
        }
    } catch (IOException e) {
        fail("Cannot load data.");
    }
}
Also used : GZIPInputStream(java.util.zip.GZIPInputStream) GZIPInputStream(java.util.zip.GZIPInputStream) InputStream(java.io.InputStream) TokenizedReader(de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader) IOException(java.io.IOException) Tokenizer(de.lmu.ifi.dbs.elki.utilities.io.Tokenizer) DoubleArray(de.lmu.ifi.dbs.elki.utilities.datastructures.arraylike.DoubleArray)

Example 5 with Tokenizer

use of de.lmu.ifi.dbs.elki.utilities.io.Tokenizer in project elki by elki-project.

the class ExternalClustering method run.

/**
 * Run the algorithm.
 *
 * @param database Database to use
 * @return Result
 */
@Override
public Clustering<? extends Model> run(Database database) {
    Clustering<? extends Model> m = null;
    try (// 
    InputStream in = FileUtil.tryGzipInput(new FileInputStream(file));
        TokenizedReader reader = CSVReaderFormat.DEFAULT_FORMAT.makeReader()) {
        Tokenizer tokenizer = reader.getTokenizer();
        reader.reset(in);
        IntArrayList assignment = new IntArrayList(database.getRelation(TypeUtil.DBID).size());
        ArrayList<String> name = new ArrayList<>();
        line: while (reader.nextLineExceptComments()) {
            for (; /* initialized by nextLineExceptComments */
            tokenizer.valid(); tokenizer.advance()) {
                try {
                    assignment.add(tokenizer.getIntBase10());
                } catch (NumberFormatException e) {
                    name.add(tokenizer.getSubstring());
                }
            }
            if (LOG.isDebuggingFinest()) {
                LOG.debugFinest("Read " + assignment.size() + " assignments and " + name.size() + " labels.");
            }
            for (Relation<?> r : database.getRelations()) {
                if (r.size() == assignment.size()) {
                    attachToRelation(database, r, assignment, name);
                    assignment.clear();
                    name.clear();
                    continue line;
                }
            }
            throw new AbortException("No relation found to match with clustering of size " + assignment.size());
        }
    } catch (IOException e) {
        throw new AbortException("Could not load outlier scores: " + e.getMessage() + " when loading " + file, e);
    }
    return m;
}
Also used : FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) Relation(de.lmu.ifi.dbs.elki.database.relation.Relation) TokenizedReader(de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) Tokenizer(de.lmu.ifi.dbs.elki.utilities.io.Tokenizer) AbortException(de.lmu.ifi.dbs.elki.utilities.exceptions.AbortException)

Aggregations

TokenizedReader (de.lmu.ifi.dbs.elki.utilities.io.TokenizedReader)5 Tokenizer (de.lmu.ifi.dbs.elki.utilities.io.Tokenizer)5 InputStream (java.io.InputStream)5 IOException (java.io.IOException)4 DoubleArray (de.lmu.ifi.dbs.elki.utilities.datastructures.arraylike.DoubleArray)3 GZIPInputStream (java.util.zip.GZIPInputStream)3 AbortException (de.lmu.ifi.dbs.elki.utilities.exceptions.AbortException)2 FileInputStream (java.io.FileInputStream)2 WritableDoubleDataStore (de.lmu.ifi.dbs.elki.database.datastore.WritableDoubleDataStore)1 DBIDIter (de.lmu.ifi.dbs.elki.database.ids.DBIDIter)1 DoubleRelation (de.lmu.ifi.dbs.elki.database.relation.DoubleRelation)1 MaterializedDoubleRelation (de.lmu.ifi.dbs.elki.database.relation.MaterializedDoubleRelation)1 Relation (de.lmu.ifi.dbs.elki.database.relation.Relation)1 DoubleMinMax (de.lmu.ifi.dbs.elki.math.DoubleMinMax)1 AbstractDistributionTest (de.lmu.ifi.dbs.elki.math.statistics.distribution.AbstractDistributionTest)1 BasicOutlierScoreMeta (de.lmu.ifi.dbs.elki.result.outlier.BasicOutlierScoreMeta)1 InvertedOutlierScoreMeta (de.lmu.ifi.dbs.elki.result.outlier.InvertedOutlierScoreMeta)1 OutlierResult (de.lmu.ifi.dbs.elki.result.outlier.OutlierResult)1 OutlierScoreMeta (de.lmu.ifi.dbs.elki.result.outlier.OutlierScoreMeta)1 OutlierScalingFunction (de.lmu.ifi.dbs.elki.utilities.scaling.outlier.OutlierScalingFunction)1