Search in sources :

Example 1 with PeakListRowSorter

use of net.sf.mzmine.util.PeakListRowSorter in project mzmine2 by mzmine.

the class MSMSLibrarySubmissionWindow method setData.

/**
 * Sort rows
 *
 * @param rows
 * @param raw
 * @param sorting
 * @param direction
 */
public void setData(PeakListRow[] rows, SortingProperty sorting, SortingDirection direction, boolean isFragmentScan) {
    Arrays.sort(rows, new PeakListRowSorter(sorting, direction));
    setData(rows, isFragmentScan);
}
Also used : PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter)

Example 2 with PeakListRowSorter

use of net.sf.mzmine.util.PeakListRowSorter in project mzmine2 by mzmine.

the class IsotopePeakScannerTask method run.

@Override
public void run() {
    if (!checkParameters())
        return;
    setStatus(TaskStatus.PROCESSING);
    totalRows = peakList.getNumberOfRows();
    double[][] diff = setUpDiffAutoCarbon();
    if (diff == null) {
        message = "ERROR: could not set up diff.";
        setStatus(TaskStatus.ERROR);
        return;
    }
    logger.info("diff.length: " + diff.length);
    logger.info("maxPatternIndex: " + maxPatternIndex);
    logger.info("maxPatternSize: " + maxPatternSize);
    // get all rows and sort by m/z
    PeakListRow[] rows = peakList.getRows();
    Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
    PeakListHandler plh = new PeakListHandler();
    plh.setUp(peakList);
    resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
    PeakListHandler resultMap = new PeakListHandler();
    for (int i = 0; i < totalRows; i++) {
        // i will represent the index of the row in peakList
        if (rows[i].getPeakIdentities().length > 0) {
            finishedRows++;
            continue;
        }
        message = "Row " + i + "/" + totalRows;
        // now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
        // pL[index].mz+maxMass
        ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff[maxPatternIndex][diff[maxPatternIndex].length - 1]);
        if (groupedPeaks.size() < 2) {
            finishedRows++;
            continue;
        }
        // else
        // logger.info("groupedPeaks.size > 2 in row: " + i + " size: " +
        // groupedPeaks.size());
        // this will store row
        ResultBuffer[][] resultBuffer = new ResultBuffer[diff.length][];
        for (int p = 0; p < diff.length; p++) {
            // resultBuffer[i] index will represent Isotope[i] (if
            // numAtoms = 0)
            resultBuffer[p] = new ResultBuffer[diff[p].length];
            for (int k = 0; k < diff[p].length; k++) // [p][0] will be the isotope with lowest mass#
            resultBuffer[p][k] = new ResultBuffer();
        }
        // of all features with fitting rt
        // and mz
        boolean[] trueBuffers = new boolean[diff.length];
        Arrays.fill(trueBuffers, false);
        for (// go through all possible peaks
        int j = 0; // go through all possible peaks
        j < groupedPeaks.size(); // go through all possible peaks
        j++) {
            for (int p = 0; p < diff.length; p++) {
                for (// check for each peak if it is a possible
                int k = 0; // check for each peak if it is a possible
                k < diff[p].length; // check for each peak if it is a possible
                k++) // feature
                // for
                // every diff[](isotope)
                {
                    // p = pattern index for autoCarbon
                    if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff[p][k], groupedPeaks.get(j).getAverageMZ())) {
                        // this will automatically add groupedPeaks[0] to the list -> isotope with
                        // lowest mass
                        // +1 result for isotope k
                        resultBuffer[p][k].addFound();
                        // row in groupedPeaks[]
                        resultBuffer[p][k].addRow(j);
                        resultBuffer[p][k].addID(groupedPeaks.get(j).getID());
                    }
                }
            }
        }
        boolean foundOne = false;
        for (int p = 0; p < diff.length; p++) if (checkIfAllTrue(resultBuffer[p])) {
            // this means that for every isotope we expected to
            // find,
            // we found one or more possible features
            foundOne = true;
            trueBuffers[p] = true;
        // logger.info("Row: " + i + " filled buffer[" + p +"]");
        }
        if (!foundOne) {
            finishedRows++;
            continue;
        }
        Candidates[] candidates = new Candidates[diff.length];
        for (int p = 0; p < diff.length; p++) candidates[p] = new Candidates(diff[p].length, minHeight, mzTolerance, pattern[p], massListName, plh, ratingType);
        for (int p = 0; p < diff.length; p++) {
            if (!trueBuffers[p])
                continue;
            for (// reminder: resultBuffer.length =
            int k = 0; // reminder: resultBuffer.length =
            k < resultBuffer[p].length; // reminder: resultBuffer.length =
            k++) // diff.length
            {
                for (int l = 0; l < resultBuffer[p][k].getFoundCount(); l++) {
                    // k represents index resultBuffer[k] and thereby the isotope number
                    // l represents the number of results in resultBuffer[k]
                    candidates[p].checkForBetterRating(k, groupedPeaks.get(0), groupedPeaks.get(resultBuffer[p][k].getRow(l)), minRating, checkIntensity);
                }
            }
        }
        foundOne = false;
        boolean[] trueCandidates = new boolean[diff.length];
        Arrays.fill(trueCandidates, false);
        for (int p = 0; p < diff.length; p++) {
            if (trueBuffers[p] && checkIfAllTrue(candidates[p].getCandidates())) {
                trueCandidates[p] = true;
                foundOne = true;
            // logger.info("Row: " + i + " filled candidates[" + p + "]");
            }
        }
        if (!foundOne) {
            finishedRows++;
            // jump to next i
            continue;
        }
        // find best result now, first we have to calc avg ratings if specified by user
        int bestPatternIndex = 0;
        double bestRating = 0.0;
        for (int p = 0; p < diff.length; p++) {
            if (!trueCandidates[p])
                continue;
            if (accurateAvgIntensity)
                candidates[p].calcAvgRatings();
            if (accurateAvgIntensity && candidates[p].getAvgAccAvgRating() > bestRating) {
                bestPatternIndex = p;
                bestRating = candidates[p].getAvgAccAvgRating();
            } else if (!accurateAvgIntensity && candidates[p].getSimpleAvgRating() > bestRating) {
                bestPatternIndex = p;
                bestRating = candidates[p].getSimpleAvgRating();
            }
        }
        if (!checkIfAllTrue(candidates[bestPatternIndex].getCandidates())) {
            logger.warning("We were about to add candidates with null pointers.\nThis was no valid result. Continueing.");
            continue;
        }
        // TODO: this shouldnt be needed, fix the bug that causes the crash later on.
        // this happens occasionally if the user wants to do accurate average but does not filter
        // by RT. then possible isotope peaks are found, although they are not detected at the same
        // time. This will result in the candidates return -1.0 which will sooner or later return a
        // null pointer Fixing this will be done in a future update, but needs a rework of the
        // candidates class.
        // The results you miss by skipping here would have not been valid results anyway, so this
        // is not urgent. Will be nicer though, because of cleaner code.
        // PeakListRow parent = copyPeakRow(peakList.getRow(i));
        boolean allPeaksAddable = true;
        List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
        PeakListRow original = getRowFromCandidate(candidates, bestPatternIndex, 0, plh);
        if (original == null)
            continue;
        PeakListRow parent = copyPeakRow(original);
        if (// if we can assign this row multiple times we
        resultMap.containsID(parent.getID()))
            // have to copy the comment, because adding it to
            // the map twice will overwrite the results
            addComment(parent, resultMap.getRowByID(parent.getID()).getComment());
        // ID is added to be able to sort by
        addComment(parent, parent.getID() + "--IS PARENT--");
        if (carbonRange != 1)
            addComment(parent, "BestPattern: " + pattern[bestPatternIndex].getDescription());
        rowBuffer.add(parent);
        DataPoint[] dp = new DataPoint[pattern[bestPatternIndex].getNumberOfDataPoints()];
        if (accurateAvgIntensity) {
            dp[0] = new SimpleDataPoint(parent.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(0));
        } else {
            dp[0] = new SimpleDataPoint(parent.getAverageMZ(), parent.getAverageHeight());
        }
        for (// we skip k=0 because ==
        int k = 1; // we skip k=0 because ==
        k < candidates[bestPatternIndex].size(); // we skip k=0 because ==
        k++) // groupedPeaks[0]/
        // ==candidates.get(0) which we added before
        {
            PeakListRow originalChild = getRowFromCandidate(candidates, bestPatternIndex, k, plh);
            if (originalChild == null) {
                allPeaksAddable = false;
                continue;
            }
            PeakListRow child = copyPeakRow(originalChild);
            if (accurateAvgIntensity) {
                dp[k] = new SimpleDataPoint(child.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(k));
            } else {
                dp[k] = new SimpleDataPoint(child.getAverageMZ(), child.getAverageHeight());
            }
            String average = "";
            if (accurateAvgIntensity) {
                average = " AvgRating: " + round(candidates[bestPatternIndex].getAvgRating(k), 3);
            }
            addComment(parent, "Intensity ratios: " + getIntensityRatios(pattern[bestPatternIndex], pattern[bestPatternIndex].getHighestDataPointIndex()));
            if (accurateAvgIntensity)
                addComment(parent, " Avg pattern rating: " + round(candidates[bestPatternIndex].getAvgAccAvgRating(), 3));
            else
                addComment(parent, " pattern rating: " + round(candidates[bestPatternIndex].getSimpleAvgRating(), 3));
            addComment(child, (parent.getID() + "-Parent ID" + " m/z-shift(ppm): " + round(((child.getAverageMZ() - parent.getAverageMZ()) - diff[bestPatternIndex][k]) / child.getAverageMZ() * 1E6, 2) + " I(c)/I(p): " + round(child.getAverageHeight() / plh.getRowByID(candidates[bestPatternIndex].get(pattern[bestPatternIndex].getHighestDataPointIndex()).getCandID()).getAverageHeight(), 2) + " Identity: " + pattern[bestPatternIndex].getIsotopeComposition(k) + " Rating: " + round(candidates[bestPatternIndex].get(k).getRating(), 3) + average));
            rowBuffer.add(child);
        }
        if (!allPeaksAddable)
            continue;
        IsotopePattern resultPattern = new SimpleIsotopePattern(dp, IsotopePatternStatus.DETECTED, element + " monoisotopic mass: " + parent.getAverageMZ());
        parent.getBestPeak().setIsotopePattern(resultPattern);
        for (PeakListRow row : rowBuffer) {
            row.getBestPeak().setIsotopePattern(resultPattern);
            resultMap.addRow(row);
        }
        if (isCanceled())
            return;
        finishedRows++;
    }
    ArrayList<Integer> keys = resultMap.getAllKeys();
    for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
    if (resultPeakList.getNumberOfRows() > 1)
        addResultToProject();
    else
        message = "Element not found.";
    setStatus(TaskStatus.FINISHED);
}
Also used : ArrayList(java.util.ArrayList) IsotopePattern(net.sf.mzmine.datamodel.IsotopePattern) ExtendedIsotopePattern(net.sf.mzmine.datamodel.impl.ExtendedIsotopePattern) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) PeakListRow(net.sf.mzmine.datamodel.PeakListRow) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint)

Example 3 with PeakListRowSorter

use of net.sf.mzmine.util.PeakListRowSorter in project mzmine2 by mzmine.

the class MultiRawDataLearnerTask method run.

/**
 * @see Runnable#run()
 */
@Override
public void run() {
    setStatus(TaskStatus.PROCESSING);
    logger.info("Running learner task on " + peakList);
    // Create a new results peakList which is added at the end
    resultPeakList = new SimplePeakList(peakList + " " + suffix, peakList.getRawDataFiles());
    /**
     * - A PeakList is a list of Features (peak in retention time dimension with accurate m/z)<br>
     * ---- contains one or multiple RawDataFiles <br>
     * ---- access mean retention time, mean m/z, maximum intensity, ...<br>
     * - A RawDataFile holds a full chromatographic run with all ms scans<br>
     * ---- Each Scan and the underlying raw data can be accessed <br>
     * ---- Scans can be filtered by MS level, polarity, ...<br>
     */
    // get all rows and sort by m/z
    PeakListRow[] rows = peakList.getRows();
    Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
    // number of rawFiles is 1 prior to peaklist alignment
    RawDataFile[] rawFiles = peakList.getRawDataFiles();
    boolean isAlignedPeakList = rawFiles.length > 1;
    totalRows = rows.length;
    // loop through all rows
    for (PeakListRow row : rows) {
        // loop through all raw data files
        for (RawDataFile raw : rawFiles) {
            // check for cancelled state and stop
            if (isCanceled())
                return;
            // current peak
            Feature peak = row.getPeak(raw);
            // check for peak in row for specific raw file
            if (peak != null) {
                double mz = peak.getMZ();
                double intensity = peak.getHeight();
                double rt = peak.getRT();
            // do stuff
            // ...
            }
        }
        // Update completion rate
        processedRows++;
    }
    // add to project
    addResultToProject();
    logger.info("Finished on " + peakList);
    setStatus(TaskStatus.FINISHED);
}
Also used : PeakListRow(net.sf.mzmine.datamodel.PeakListRow) RawDataFile(net.sf.mzmine.datamodel.RawDataFile) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) Feature(net.sf.mzmine.datamodel.Feature) PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter)

Example 4 with PeakListRowSorter

use of net.sf.mzmine.util.PeakListRowSorter in project mzmine2 by mzmine.

the class MultiMSMSWindow method setData.

/**
 * Sort rows
 *
 * @param rows
 * @param raw
 * @param sorting
 * @param direction
 */
public void setData(PeakListRow[] rows, RawDataFile[] allRaw, RawDataFile raw, boolean createMS1, SortingProperty sorting, SortingDirection direction) {
    Arrays.sort(rows, new PeakListRowSorter(sorting, direction));
    setData(rows, allRaw, raw, createMS1);
}
Also used : PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter)

Example 5 with PeakListRowSorter

use of net.sf.mzmine.util.PeakListRowSorter in project mzmine2 by mzmine.

the class NeutralLossFilterTask method run.

@Override
public void run() {
    setStatus(TaskStatus.PROCESSING);
    totalRows = peakList.getNumberOfRows();
    ArrayList<Double> diff = setUpDiff();
    if (diff == null || Double.compare(dMassLoss, 0.0d) == 0) {
        setErrorMessage("Could not set up neutral loss. Mass loss could not be calculated from the formula or is 0.0");
        setStatus(TaskStatus.ERROR);
        return;
    }
    if (suffix.equals("auto")) {
        if (molecule.equals(""))
            suffix = " NL: " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
        else
            suffix = " NL (" + molecule + "): " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
    }
    // get all rows and sort by m/z
    PeakListRow[] rows = peakList.getRows();
    Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
    PeakListHandler plh = new PeakListHandler();
    plh.setUp(peakList);
    resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
    PeakListHandler resultMap = new PeakListHandler();
    for (int i = 0; i < totalRows; i++) {
        // i will represent the index of the row in peakList
        if (rows[i].getPeakIdentities().length > 0) {
            finishedRows++;
            continue;
        }
        message = "Row " + i + "/" + totalRows;
        // now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
        // pL[index].mz+maxMass
        ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff.get(diff.size() - 1).doubleValue());
        if (groupedPeaks.size() < 2) {
            finishedRows++;
            continue;
        }
        // this will store row indexes of
        ResultBuffer[] resultBuffer = new ResultBuffer[diff.size()];
        // and mz
        for (// resultBuffer[i] index will represent Isotope[i] (if
        int a = 0; // resultBuffer[i] index will represent Isotope[i] (if
        a < diff.size(); // resultBuffer[i] index will represent Isotope[i] (if
        a++) // numAtoms = 0)
        // [0] will be the isotope with lowest mass#
        resultBuffer[a] = new ResultBuffer();
        for (// go through all possible peaks
        int j = 0; // go through all possible peaks
        j < groupedPeaks.size(); // go through all possible peaks
        j++) {
            for (// check for each peak if it is a possible feature for
            int k = 0; // check for each peak if it is a possible feature for
            k < diff.size(); // check for each peak if it is a possible feature for
            k++) // every diff[](isotope)
            {
                // k represents the isotope number the peak will be a candidate for
                if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff.get(k), groupedPeaks.get(j).getAverageMZ())) {
                    // this will automatically add groupedPeaks[0] to the list -> isotope with
                    // lowest mass
                    // +1 result for isotope k
                    resultBuffer[k].addFound();
                    // row in groupedPeaks[]
                    resultBuffer[k].addRow(j);
                    resultBuffer[k].addID(groupedPeaks.get(j).getID());
                }
            }
        }
        if (// this means that for every isotope we expected to find,
        !checkIfAllTrue(resultBuffer)) // we found one or more possible features
        {
            finishedRows++;
            continue;
        }
        Candidates candidates = new Candidates(diff.size(), minHeight, mzTolerance, plh);
        for (// reminder: resultBuffer.length = diff.size()
        int k = 0; // reminder: resultBuffer.length = diff.size()
        k < resultBuffer.length; // reminder: resultBuffer.length = diff.size()
        k++) {
            for (int l = 0; l < resultBuffer[k].getFoundCount(); l++) {
                // k represents index resultBuffer[k] and thereby the isotope number
                // l represents the number of results in resultBuffer[k]
                candidates.get(k).checkForBetterRating(groupedPeaks, 0, resultBuffer[k].getRow(l), diff.get(k), minRating);
            }
        }
        if (!checkIfAllTrue(candidates.getCandidates())) {
            finishedRows++;
            // jump to next i
            continue;
        }
        String comParent = "", comChild = "";
        PeakListRow originalChild = getRowFromCandidate(candidates, 0, plh);
        if (originalChild == null) {
            finishedRows++;
            continue;
        }
        PeakListRow child = copyPeakRow(originalChild);
        if (resultMap.containsID(child.getID()))
            comChild += resultMap.getRowByID(child.getID()).getComment();
        comChild += "Parent ID: " + candidates.get(1).getCandID();
        addComment(child, comChild);
        List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
        boolean allPeaksAddable = true;
        rowBuffer.add(child);
        for (// we skip k=0 because == groupedPeaks[0] which we
        int k = 1; // we skip k=0 because == groupedPeaks[0] which we
        k < candidates.size(); // we skip k=0 because == groupedPeaks[0] which we
        k++) // added before
        {
            PeakListRow originalParent = getRowFromCandidate(candidates, 1, plh);
            if (originalParent == null) {
                allPeaksAddable = false;
                continue;
            }
            PeakListRow parent = copyPeakRow(originalParent);
            if (resultMap.containsID(parent.getID()))
                comParent += resultMap.getRowByID(parent.getID()).getComment();
            comParent += ("[--IS PARENT-- child ID: " + child.getID() + " ] | ");
            addComment(parent, comParent);
            addComment(child, " m/z shift(ppm): " + round(((parent.getAverageMZ() - child.getAverageMZ()) - diff.get(1)) / parent.getAverageMZ() * 1E6, 2) + " ");
            rowBuffer.add(parent);
        }
        if (allPeaksAddable)
            for (PeakListRow row : rowBuffer) resultMap.addRow(row);
        if (isCanceled())
            return;
        finishedRows++;
    }
    ArrayList<Integer> keys = resultMap.getAllKeys();
    for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
    if (resultPeakList.getNumberOfRows() > 1)
        addResultToProject();
    else
        message = "Element not found.";
    setStatus(TaskStatus.FINISHED);
}
Also used : Candidates(net.sf.mzmine.modules.peaklistmethods.isotopes.isotopepeakscanner.Candidates) ResultBuffer(net.sf.mzmine.modules.peaklistmethods.isotopes.isotopepeakscanner.ResultBuffer) PeakListHandler(net.sf.mzmine.modules.peaklistmethods.isotopes.isotopepeakscanner.PeakListHandler) ArrayList(java.util.ArrayList) PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) PeakListRow(net.sf.mzmine.datamodel.PeakListRow) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList)

Aggregations

PeakListRowSorter (net.sf.mzmine.util.PeakListRowSorter)18 PeakListRow (net.sf.mzmine.datamodel.PeakListRow)16 SimplePeakList (net.sf.mzmine.datamodel.impl.SimplePeakList)10 PeakList (net.sf.mzmine.datamodel.PeakList)6 SimplePeakListRow (net.sf.mzmine.datamodel.impl.SimplePeakListRow)5 Feature (net.sf.mzmine.datamodel.Feature)3 RawDataFile (net.sf.mzmine.datamodel.RawDataFile)3 SimplePeakListAppliedMethod (net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod)3 ArrayList (java.util.ArrayList)2 Vector (java.util.Vector)2 DataPoint (net.sf.mzmine.datamodel.DataPoint)2 SimpleFeature (net.sf.mzmine.datamodel.impl.SimpleFeature)2 Desktop (net.sf.mzmine.desktop.Desktop)2 HeadLessDesktop (net.sf.mzmine.desktop.impl.HeadLessDesktop)2 IsotopePattern (net.sf.mzmine.datamodel.IsotopePattern)1 PeakListAppliedMethod (net.sf.mzmine.datamodel.PeakList.PeakListAppliedMethod)1 ExtendedIsotopePattern (net.sf.mzmine.datamodel.impl.ExtendedIsotopePattern)1 SimpleDataPoint (net.sf.mzmine.datamodel.impl.SimpleDataPoint)1 SimpleIsotopePattern (net.sf.mzmine.datamodel.impl.SimpleIsotopePattern)1 Candidates (net.sf.mzmine.modules.peaklistmethods.isotopes.isotopepeakscanner.Candidates)1