Search in sources :

Example 1 with SimplePeakList

use of net.sf.mzmine.datamodel.impl.SimplePeakList in project mzmine2 by mzmine.

the class FragmentSearchTask method run.

/**
 * @see java.lang.Runnable#run()
 */
public void run() {
    setStatus(TaskStatus.PROCESSING);
    logger.info("Starting fragments search in " + peakList);
    PeakListRow[] rows = peakList.getRows();
    totalRows = rows.length;
    // Start with the highest peaks
    Arrays.sort(rows, new PeakListRowSorter(SortingProperty.Height, SortingDirection.Descending));
    // Compare each two rows against each other
    for (int i = 0; i < totalRows; i++) {
        for (int j = i + 1; j < rows.length; j++) {
            // Task canceled?
            if (isCanceled())
                return;
            // smaller one may be a fragment
            if (rows[i].getAverageMZ() > rows[j].getAverageMZ()) {
                if (checkFragment(rows[i], rows[j]))
                    addFragmentInfo(rows[i], rows[j]);
            } else {
                if (checkFragment(rows[j], rows[i]))
                    addFragmentInfo(rows[j], rows[i]);
            }
        }
        finishedRows++;
    }
    // Add task description to peakList
    ((SimplePeakList) peakList).addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Identification of fragments", parameters));
    // Repaint the window to reflect the change in the feature list
    Desktop desktop = MZmineCore.getDesktop();
    if (!(desktop instanceof HeadLessDesktop))
        desktop.getMainWindow().repaint();
    setStatus(TaskStatus.FINISHED);
    logger.info("Finished fragments search in " + peakList);
}
Also used : PeakListRow(net.sf.mzmine.datamodel.PeakListRow) HeadLessDesktop(net.sf.mzmine.desktop.impl.HeadLessDesktop) Desktop(net.sf.mzmine.desktop.Desktop) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimplePeakListAppliedMethod(net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod) PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter) DataPoint(net.sf.mzmine.datamodel.DataPoint) HeadLessDesktop(net.sf.mzmine.desktop.impl.HeadLessDesktop)

Example 2 with SimplePeakList

use of net.sf.mzmine.datamodel.impl.SimplePeakList in project mzmine2 by mzmine.

the class Ms2SearchTask method run.

/**
 * @see java.lang.Runnable#run()
 */
public void run() {
    setStatus(TaskStatus.PROCESSING);
    logger.info("Starting MS2 similarity search between " + peakList1 + " and " + peakList2 + " with mz tolerance:" + mzTolerance.getPpmTolerance());
    Ms2SearchResult searchResult;
    PeakListRow[] rows1 = peakList1.getRows();
    PeakListRow[] rows2 = peakList2.getRows();
    int rows1Length = rows1.length;
    int rows2Length = rows2.length;
    totalRows = rows1Length;
    for (int i = 0; i < rows1Length; i++) {
        for (int j = 0; j < rows2Length; j++) {
            Feature featureA = rows1[i].getBestPeak();
            Feature featureB = rows2[j].getBestPeak();
            // Complication. The "best" peak, may not have the "best" fragmentation
            Scan scanA = rows1[i].getBestFragmentation();
            Scan scanB = rows2[j].getBestFragmentation();
            searchResult = simpleMS2similarity(scanA, scanB, intensityThreshold, mzTolerance, massListName);
            // Report the final score to the peaklist identity
            if (searchResult != null && searchResult.getScore() > scoreThreshold && searchResult.getNumIonsMatched() >= minimumIonsMatched)
                this.addMS2Identity(rows1[i], featureA, featureB, searchResult);
            if (isCanceled())
                return;
        }
        // Update progress bar
        finishedRows++;
    }
    // Add task description to peakList
    ((SimplePeakList) peakList1).addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Identification of similar MS2s", parameters));
    // Repaint the window to reflect the change in the feature list
    Desktop desktop = MZmineCore.getDesktop();
    if (!(desktop instanceof HeadLessDesktop))
        desktop.getMainWindow().repaint();
    setStatus(TaskStatus.FINISHED);
    logger.info("Finished MS2 similarity search for " + peakList1 + "against" + peakList2);
}
Also used : PeakListRow(net.sf.mzmine.datamodel.PeakListRow) HeadLessDesktop(net.sf.mzmine.desktop.impl.HeadLessDesktop) Desktop(net.sf.mzmine.desktop.Desktop) Scan(net.sf.mzmine.datamodel.Scan) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimplePeakListAppliedMethod(net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod) Feature(net.sf.mzmine.datamodel.Feature) DataPoint(net.sf.mzmine.datamodel.DataPoint) HeadLessDesktop(net.sf.mzmine.desktop.impl.HeadLessDesktop)

Example 3 with SimplePeakList

use of net.sf.mzmine.datamodel.impl.SimplePeakList in project mzmine2 by mzmine.

the class ADAP3DecompositionV1_5Task method decomposePeaks.

private PeakList decomposePeaks(PeakList peakList) throws CloneNotSupportedException, IOException {
    RawDataFile dataFile = peakList.getRawDataFile(0);
    // Create new feature list.
    final PeakList resolvedPeakList = new SimplePeakList(peakList + " " + parameters.getParameter(ADAP3DecompositionV1_5Parameters.SUFFIX).getValue(), dataFile);
    // Load previous applied methods.
    for (final PeakList.PeakListAppliedMethod method : peakList.getAppliedMethods()) {
        resolvedPeakList.addDescriptionOfAppliedTask(method);
    }
    // Add task description to feature list.
    resolvedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Peak deconvolution by ADAP-3", parameters));
    // Collect peak information
    List<Peak> peaks = getPeaks(peakList, this.parameters.getParameter(ADAP3DecompositionV1_5Parameters.EDGE_TO_HEIGHT_RATIO).getValue(), this.parameters.getParameter(ADAP3DecompositionV1_5Parameters.DELTA_TO_HEIGHT_RATIO).getValue());
    // Find components (a.k.a. clusters of peaks with fragmentation spectra)
    List<Component> components = getComponents(peaks);
    // Create PeakListRow for each components
    List<PeakListRow> newPeakListRows = new ArrayList<>();
    int rowID = 0;
    for (final Component component : components) {
        if (component.getSpectrum().isEmpty())
            continue;
        PeakListRow row = new SimplePeakListRow(++rowID);
        // Add the reference peak
        PeakListRow refPeakRow = originalPeakList.getRow(component.getBestPeak().getInfo().peakID);
        Feature refPeak = new SimpleFeature(refPeakRow.getBestPeak());
        // Add spectrum
        List<DataPoint> dataPoints = new ArrayList<>();
        for (Map.Entry<Double, Double> entry : component.getSpectrum().entrySet()) {
            dataPoints.add(new SimpleDataPoint(entry.getKey(), entry.getValue()));
        }
        refPeak.setIsotopePattern(new SimpleIsotopePattern(dataPoints.toArray(new DataPoint[dataPoints.size()]), IsotopePattern.IsotopePatternStatus.PREDICTED, "Spectrum"));
        row.addPeak(dataFile, refPeak);
        // Add PeakInformation
        if (refPeakRow.getPeakInformation() == null) {
            SimplePeakInformation information = new SimplePeakInformation(new HashMap<>(refPeakRow.getPeakInformation().getAllProperties()));
            row.setPeakInformation(information);
        }
        // Set row properties
        row.setAverageMZ(refPeakRow.getAverageMZ());
        row.setAverageRT(refPeakRow.getAverageRT());
        // resolvedPeakList.addRow(row);
        newPeakListRows.add(row);
    }
    // ------------------------------------
    // Sort new peak rows by retention time
    // ------------------------------------
    Collections.sort(newPeakListRows, new Comparator<PeakListRow>() {

        @Override
        public int compare(PeakListRow row1, PeakListRow row2) {
            double retTime1 = row1.getAverageRT();
            double retTime2 = row2.getAverageRT();
            return Double.compare(retTime1, retTime2);
        }
    });
    for (PeakListRow row : newPeakListRows) resolvedPeakList.addRow(row);
    return resolvedPeakList;
}
Also used : ArrayList(java.util.ArrayList) SimplePeakListAppliedMethod(net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) Feature(net.sf.mzmine.datamodel.Feature) SimpleFeature(net.sf.mzmine.datamodel.impl.SimpleFeature) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) PeakListRow(net.sf.mzmine.datamodel.PeakListRow) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) Peak(dulab.adap.datamodel.Peak) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) Component(dulab.adap.datamodel.Component) SimplePeakInformation(net.sf.mzmine.datamodel.impl.SimplePeakInformation) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimpleFeature(net.sf.mzmine.datamodel.impl.SimpleFeature) RawDataFile(net.sf.mzmine.datamodel.RawDataFile) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) PeakList(net.sf.mzmine.datamodel.PeakList) HashMap(java.util.HashMap) Map(java.util.Map) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap)

Example 4 with SimplePeakList

use of net.sf.mzmine.datamodel.impl.SimplePeakList in project mzmine2 by mzmine.

the class IsotopePeakScannerTask method run.

@Override
public void run() {
    if (!checkParameters())
        return;
    setStatus(TaskStatus.PROCESSING);
    totalRows = peakList.getNumberOfRows();
    double[][] diff = setUpDiffAutoCarbon();
    if (diff == null) {
        message = "ERROR: could not set up diff.";
        setStatus(TaskStatus.ERROR);
        return;
    }
    logger.info("diff.length: " + diff.length);
    logger.info("maxPatternIndex: " + maxPatternIndex);
    logger.info("maxPatternSize: " + maxPatternSize);
    // get all rows and sort by m/z
    PeakListRow[] rows = peakList.getRows();
    Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
    PeakListHandler plh = new PeakListHandler();
    plh.setUp(peakList);
    resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
    PeakListHandler resultMap = new PeakListHandler();
    for (int i = 0; i < totalRows; i++) {
        // i will represent the index of the row in peakList
        if (rows[i].getPeakIdentities().length > 0) {
            finishedRows++;
            continue;
        }
        message = "Row " + i + "/" + totalRows;
        // now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
        // pL[index].mz+maxMass
        ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff[maxPatternIndex][diff[maxPatternIndex].length - 1]);
        if (groupedPeaks.size() < 2) {
            finishedRows++;
            continue;
        }
        // else
        // logger.info("groupedPeaks.size > 2 in row: " + i + " size: " +
        // groupedPeaks.size());
        // this will store row
        ResultBuffer[][] resultBuffer = new ResultBuffer[diff.length][];
        for (int p = 0; p < diff.length; p++) {
            // resultBuffer[i] index will represent Isotope[i] (if
            // numAtoms = 0)
            resultBuffer[p] = new ResultBuffer[diff[p].length];
            for (int k = 0; k < diff[p].length; k++) // [p][0] will be the isotope with lowest mass#
            resultBuffer[p][k] = new ResultBuffer();
        }
        // of all features with fitting rt
        // and mz
        boolean[] trueBuffers = new boolean[diff.length];
        Arrays.fill(trueBuffers, false);
        for (// go through all possible peaks
        int j = 0; // go through all possible peaks
        j < groupedPeaks.size(); // go through all possible peaks
        j++) {
            for (int p = 0; p < diff.length; p++) {
                for (// check for each peak if it is a possible
                int k = 0; // check for each peak if it is a possible
                k < diff[p].length; // check for each peak if it is a possible
                k++) // feature
                // for
                // every diff[](isotope)
                {
                    // p = pattern index for autoCarbon
                    if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff[p][k], groupedPeaks.get(j).getAverageMZ())) {
                        // this will automatically add groupedPeaks[0] to the list -> isotope with
                        // lowest mass
                        // +1 result for isotope k
                        resultBuffer[p][k].addFound();
                        // row in groupedPeaks[]
                        resultBuffer[p][k].addRow(j);
                        resultBuffer[p][k].addID(groupedPeaks.get(j).getID());
                    }
                }
            }
        }
        boolean foundOne = false;
        for (int p = 0; p < diff.length; p++) if (checkIfAllTrue(resultBuffer[p])) {
            // this means that for every isotope we expected to
            // find,
            // we found one or more possible features
            foundOne = true;
            trueBuffers[p] = true;
        // logger.info("Row: " + i + " filled buffer[" + p +"]");
        }
        if (!foundOne) {
            finishedRows++;
            continue;
        }
        Candidates[] candidates = new Candidates[diff.length];
        for (int p = 0; p < diff.length; p++) candidates[p] = new Candidates(diff[p].length, minHeight, mzTolerance, pattern[p], massListName, plh, ratingType);
        for (int p = 0; p < diff.length; p++) {
            if (!trueBuffers[p])
                continue;
            for (// reminder: resultBuffer.length =
            int k = 0; // reminder: resultBuffer.length =
            k < resultBuffer[p].length; // reminder: resultBuffer.length =
            k++) // diff.length
            {
                for (int l = 0; l < resultBuffer[p][k].getFoundCount(); l++) {
                    // k represents index resultBuffer[k] and thereby the isotope number
                    // l represents the number of results in resultBuffer[k]
                    candidates[p].checkForBetterRating(k, groupedPeaks.get(0), groupedPeaks.get(resultBuffer[p][k].getRow(l)), minRating, checkIntensity);
                }
            }
        }
        foundOne = false;
        boolean[] trueCandidates = new boolean[diff.length];
        Arrays.fill(trueCandidates, false);
        for (int p = 0; p < diff.length; p++) {
            if (trueBuffers[p] && checkIfAllTrue(candidates[p].getCandidates())) {
                trueCandidates[p] = true;
                foundOne = true;
            // logger.info("Row: " + i + " filled candidates[" + p + "]");
            }
        }
        if (!foundOne) {
            finishedRows++;
            // jump to next i
            continue;
        }
        // find best result now, first we have to calc avg ratings if specified by user
        int bestPatternIndex = 0;
        double bestRating = 0.0;
        for (int p = 0; p < diff.length; p++) {
            if (!trueCandidates[p])
                continue;
            if (accurateAvgIntensity)
                candidates[p].calcAvgRatings();
            if (accurateAvgIntensity && candidates[p].getAvgAccAvgRating() > bestRating) {
                bestPatternIndex = p;
                bestRating = candidates[p].getAvgAccAvgRating();
            } else if (!accurateAvgIntensity && candidates[p].getSimpleAvgRating() > bestRating) {
                bestPatternIndex = p;
                bestRating = candidates[p].getSimpleAvgRating();
            }
        }
        if (!checkIfAllTrue(candidates[bestPatternIndex].getCandidates())) {
            logger.warning("We were about to add candidates with null pointers.\nThis was no valid result. Continueing.");
            continue;
        }
        // TODO: this shouldnt be needed, fix the bug that causes the crash later on.
        // this happens occasionally if the user wants to do accurate average but does not filter
        // by RT. then possible isotope peaks are found, although they are not detected at the same
        // time. This will result in the candidates return -1.0 which will sooner or later return a
        // null pointer Fixing this will be done in a future update, but needs a rework of the
        // candidates class.
        // The results you miss by skipping here would have not been valid results anyway, so this
        // is not urgent. Will be nicer though, because of cleaner code.
        // PeakListRow parent = copyPeakRow(peakList.getRow(i));
        boolean allPeaksAddable = true;
        List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
        PeakListRow original = getRowFromCandidate(candidates, bestPatternIndex, 0, plh);
        if (original == null)
            continue;
        PeakListRow parent = copyPeakRow(original);
        if (// if we can assign this row multiple times we
        resultMap.containsID(parent.getID()))
            // have to copy the comment, because adding it to
            // the map twice will overwrite the results
            addComment(parent, resultMap.getRowByID(parent.getID()).getComment());
        // ID is added to be able to sort by
        addComment(parent, parent.getID() + "--IS PARENT--");
        if (carbonRange != 1)
            addComment(parent, "BestPattern: " + pattern[bestPatternIndex].getDescription());
        rowBuffer.add(parent);
        DataPoint[] dp = new DataPoint[pattern[bestPatternIndex].getNumberOfDataPoints()];
        if (accurateAvgIntensity) {
            dp[0] = new SimpleDataPoint(parent.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(0));
        } else {
            dp[0] = new SimpleDataPoint(parent.getAverageMZ(), parent.getAverageHeight());
        }
        for (// we skip k=0 because ==
        int k = 1; // we skip k=0 because ==
        k < candidates[bestPatternIndex].size(); // we skip k=0 because ==
        k++) // groupedPeaks[0]/
        // ==candidates.get(0) which we added before
        {
            PeakListRow originalChild = getRowFromCandidate(candidates, bestPatternIndex, k, plh);
            if (originalChild == null) {
                allPeaksAddable = false;
                continue;
            }
            PeakListRow child = copyPeakRow(originalChild);
            if (accurateAvgIntensity) {
                dp[k] = new SimpleDataPoint(child.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(k));
            } else {
                dp[k] = new SimpleDataPoint(child.getAverageMZ(), child.getAverageHeight());
            }
            String average = "";
            if (accurateAvgIntensity) {
                average = " AvgRating: " + round(candidates[bestPatternIndex].getAvgRating(k), 3);
            }
            addComment(parent, "Intensity ratios: " + getIntensityRatios(pattern[bestPatternIndex], pattern[bestPatternIndex].getHighestDataPointIndex()));
            if (accurateAvgIntensity)
                addComment(parent, " Avg pattern rating: " + round(candidates[bestPatternIndex].getAvgAccAvgRating(), 3));
            else
                addComment(parent, " pattern rating: " + round(candidates[bestPatternIndex].getSimpleAvgRating(), 3));
            addComment(child, (parent.getID() + "-Parent ID" + " m/z-shift(ppm): " + round(((child.getAverageMZ() - parent.getAverageMZ()) - diff[bestPatternIndex][k]) / child.getAverageMZ() * 1E6, 2) + " I(c)/I(p): " + round(child.getAverageHeight() / plh.getRowByID(candidates[bestPatternIndex].get(pattern[bestPatternIndex].getHighestDataPointIndex()).getCandID()).getAverageHeight(), 2) + " Identity: " + pattern[bestPatternIndex].getIsotopeComposition(k) + " Rating: " + round(candidates[bestPatternIndex].get(k).getRating(), 3) + average));
            rowBuffer.add(child);
        }
        if (!allPeaksAddable)
            continue;
        IsotopePattern resultPattern = new SimpleIsotopePattern(dp, IsotopePatternStatus.DETECTED, element + " monoisotopic mass: " + parent.getAverageMZ());
        parent.getBestPeak().setIsotopePattern(resultPattern);
        for (PeakListRow row : rowBuffer) {
            row.getBestPeak().setIsotopePattern(resultPattern);
            resultMap.addRow(row);
        }
        if (isCanceled())
            return;
        finishedRows++;
    }
    ArrayList<Integer> keys = resultMap.getAllKeys();
    for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
    if (resultPeakList.getNumberOfRows() > 1)
        addResultToProject();
    else
        message = "Element not found.";
    setStatus(TaskStatus.FINISHED);
}
Also used : ArrayList(java.util.ArrayList) IsotopePattern(net.sf.mzmine.datamodel.IsotopePattern) ExtendedIsotopePattern(net.sf.mzmine.datamodel.impl.ExtendedIsotopePattern) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) PeakListRowSorter(net.sf.mzmine.util.PeakListRowSorter) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) PeakListRow(net.sf.mzmine.datamodel.PeakListRow) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint)

Example 5 with SimplePeakList

use of net.sf.mzmine.datamodel.impl.SimplePeakList in project mzmine2 by mzmine.

the class IsotopeGrouperTask method run.

/**
 * @see Runnable#run()
 */
public void run() {
    setStatus(TaskStatus.PROCESSING);
    logger.info("Running isotopic peak grouper on " + peakList);
    // We assume source peakList contains one datafile
    RawDataFile dataFile = peakList.getRawDataFile(0);
    // Create a new deisotoped peakList
    deisotopedPeakList = new SimplePeakList(peakList + " " + suffix, peakList.getRawDataFiles());
    // Collect all selected charge states
    int[] charges = new int[maximumCharge];
    for (int i = 0; i < maximumCharge; i++) charges[i] = i + 1;
    // Sort peaks by descending height
    Feature[] sortedPeaks = peakList.getPeaks(dataFile);
    Arrays.sort(sortedPeaks, new PeakSorter(SortingProperty.Height, SortingDirection.Descending));
    // Loop through all peaks
    totalPeaks = sortedPeaks.length;
    for (int ind = 0; ind < totalPeaks; ind++) {
        if (isCanceled())
            return;
        Feature aPeak = sortedPeaks[ind];
        // Check if peak was already deleted
        if (aPeak == null) {
            processedPeaks++;
            continue;
        }
        // Check which charge state fits best around this peak
        int bestFitCharge = 0;
        int bestFitScore = -1;
        Vector<Feature> bestFitPeaks = null;
        for (int charge : charges) {
            Vector<Feature> fittedPeaks = new Vector<Feature>();
            fittedPeaks.add(aPeak);
            fitPattern(fittedPeaks, aPeak, charge, sortedPeaks);
            int score = fittedPeaks.size();
            if ((score > bestFitScore) || ((score == bestFitScore) && (bestFitCharge > charge))) {
                bestFitScore = score;
                bestFitCharge = charge;
                bestFitPeaks = fittedPeaks;
            }
        }
        PeakListRow oldRow = peakList.getPeakRow(aPeak);
        assert bestFitPeaks != null;
        // isotope, we skip this left the original peak in the feature list.
        if (bestFitPeaks.size() == 1) {
            deisotopedPeakList.addRow(oldRow);
            processedPeaks++;
            continue;
        }
        // Convert the peak pattern to array
        Feature[] originalPeaks = bestFitPeaks.toArray(new Feature[0]);
        // Create a new SimpleIsotopePattern
        DataPoint[] isotopes = new DataPoint[bestFitPeaks.size()];
        for (int i = 0; i < isotopes.length; i++) {
            Feature p = originalPeaks[i];
            isotopes[i] = new SimpleDataPoint(p.getMZ(), p.getHeight());
        }
        SimpleIsotopePattern newPattern = new SimpleIsotopePattern(isotopes, IsotopePatternStatus.DETECTED, aPeak.toString());
        // the lowest m/z peak
        if (chooseMostIntense) {
            Arrays.sort(originalPeaks, new PeakSorter(SortingProperty.Height, SortingDirection.Descending));
        } else {
            Arrays.sort(originalPeaks, new PeakSorter(SortingProperty.MZ, SortingDirection.Ascending));
        }
        Feature newPeak = new SimpleFeature(originalPeaks[0]);
        newPeak.setIsotopePattern(newPattern);
        newPeak.setCharge(bestFitCharge);
        // Keep old ID
        int oldID = oldRow.getID();
        SimplePeakListRow newRow = new SimplePeakListRow(oldID);
        PeakUtils.copyPeakListRowProperties(oldRow, newRow);
        newRow.addPeak(dataFile, newPeak);
        deisotopedPeakList.addRow(newRow);
        // Remove all peaks already assigned to isotope pattern
        for (int i = 0; i < sortedPeaks.length; i++) {
            if (bestFitPeaks.contains(sortedPeaks[i]))
                sortedPeaks[i] = null;
        }
        // Update completion rate
        processedPeaks++;
    }
    // Add new peakList to the project
    project.addPeakList(deisotopedPeakList);
    // Load previous applied methods
    for (PeakListAppliedMethod proc : peakList.getAppliedMethods()) {
        deisotopedPeakList.addDescriptionOfAppliedTask(proc);
    }
    // Add task description to peakList
    deisotopedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Isotopic peaks grouper", parameters));
    // Remove the original peakList if requested
    if (removeOriginal)
        project.removePeakList(peakList);
    logger.info("Finished isotopic peak grouper on " + peakList);
    setStatus(TaskStatus.FINISHED);
}
Also used : SimplePeakListAppliedMethod(net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod) PeakListAppliedMethod(net.sf.mzmine.datamodel.PeakList.PeakListAppliedMethod) SimplePeakListAppliedMethod(net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod) Feature(net.sf.mzmine.datamodel.Feature) SimpleFeature(net.sf.mzmine.datamodel.impl.SimpleFeature) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimpleFeature(net.sf.mzmine.datamodel.impl.SimpleFeature) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) SimplePeakListRow(net.sf.mzmine.datamodel.impl.SimplePeakListRow) PeakListRow(net.sf.mzmine.datamodel.PeakListRow) RawDataFile(net.sf.mzmine.datamodel.RawDataFile) DataPoint(net.sf.mzmine.datamodel.DataPoint) SimpleDataPoint(net.sf.mzmine.datamodel.impl.SimpleDataPoint) PeakSorter(net.sf.mzmine.util.PeakSorter) SimplePeakList(net.sf.mzmine.datamodel.impl.SimplePeakList) SimpleIsotopePattern(net.sf.mzmine.datamodel.impl.SimpleIsotopePattern) Vector(java.util.Vector)

Aggregations

SimplePeakList (net.sf.mzmine.datamodel.impl.SimplePeakList)47 PeakListRow (net.sf.mzmine.datamodel.PeakListRow)39 SimplePeakListAppliedMethod (net.sf.mzmine.datamodel.impl.SimplePeakListAppliedMethod)29 SimplePeakListRow (net.sf.mzmine.datamodel.impl.SimplePeakListRow)29 Feature (net.sf.mzmine.datamodel.Feature)25 RawDataFile (net.sf.mzmine.datamodel.RawDataFile)25 DataPoint (net.sf.mzmine.datamodel.DataPoint)19 PeakList (net.sf.mzmine.datamodel.PeakList)18 PeakListAppliedMethod (net.sf.mzmine.datamodel.PeakList.PeakListAppliedMethod)14 SimpleFeature (net.sf.mzmine.datamodel.impl.SimpleFeature)14 ArrayList (java.util.ArrayList)11 SimpleDataPoint (net.sf.mzmine.datamodel.impl.SimpleDataPoint)10 PeakListRowSorter (net.sf.mzmine.util.PeakListRowSorter)10 Scan (net.sf.mzmine.datamodel.Scan)8 Vector (java.util.Vector)7 PeakIdentity (net.sf.mzmine.datamodel.PeakIdentity)7 IsotopePattern (net.sf.mzmine.datamodel.IsotopePattern)5 Desktop (net.sf.mzmine.desktop.Desktop)5 HeadLessDesktop (net.sf.mzmine.desktop.impl.HeadLessDesktop)5 SimpleIsotopePattern (net.sf.mzmine.datamodel.impl.SimpleIsotopePattern)4