use of net.sf.mzmine.modules.peaklistmethods.isotopes.isotopepeakscanner.Candidates in project mzmine2 by mzmine.
the class NeutralLossFilterTask method run.
@Override
public void run() {
setStatus(TaskStatus.PROCESSING);
totalRows = peakList.getNumberOfRows();
ArrayList<Double> diff = setUpDiff();
if (diff == null || Double.compare(dMassLoss, 0.0d) == 0) {
setErrorMessage("Could not set up neutral loss. Mass loss could not be calculated from the formula or is 0.0");
setStatus(TaskStatus.ERROR);
return;
}
if (suffix.equals("auto")) {
if (molecule.equals(""))
suffix = " NL: " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
else
suffix = " NL (" + molecule + "): " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
}
// get all rows and sort by m/z
PeakListRow[] rows = peakList.getRows();
Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
PeakListHandler plh = new PeakListHandler();
plh.setUp(peakList);
resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
PeakListHandler resultMap = new PeakListHandler();
for (int i = 0; i < totalRows; i++) {
// i will represent the index of the row in peakList
if (rows[i].getPeakIdentities().length > 0) {
finishedRows++;
continue;
}
message = "Row " + i + "/" + totalRows;
// now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
// pL[index].mz+maxMass
ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff.get(diff.size() - 1).doubleValue());
if (groupedPeaks.size() < 2) {
finishedRows++;
continue;
}
// this will store row indexes of
ResultBuffer[] resultBuffer = new ResultBuffer[diff.size()];
// and mz
for (// resultBuffer[i] index will represent Isotope[i] (if
int a = 0; // resultBuffer[i] index will represent Isotope[i] (if
a < diff.size(); // resultBuffer[i] index will represent Isotope[i] (if
a++) // numAtoms = 0)
// [0] will be the isotope with lowest mass#
resultBuffer[a] = new ResultBuffer();
for (// go through all possible peaks
int j = 0; // go through all possible peaks
j < groupedPeaks.size(); // go through all possible peaks
j++) {
for (// check for each peak if it is a possible feature for
int k = 0; // check for each peak if it is a possible feature for
k < diff.size(); // check for each peak if it is a possible feature for
k++) // every diff[](isotope)
{
// k represents the isotope number the peak will be a candidate for
if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff.get(k), groupedPeaks.get(j).getAverageMZ())) {
// this will automatically add groupedPeaks[0] to the list -> isotope with
// lowest mass
// +1 result for isotope k
resultBuffer[k].addFound();
// row in groupedPeaks[]
resultBuffer[k].addRow(j);
resultBuffer[k].addID(groupedPeaks.get(j).getID());
}
}
}
if (// this means that for every isotope we expected to find,
!checkIfAllTrue(resultBuffer)) // we found one or more possible features
{
finishedRows++;
continue;
}
Candidates candidates = new Candidates(diff.size(), minHeight, mzTolerance, plh);
for (// reminder: resultBuffer.length = diff.size()
int k = 0; // reminder: resultBuffer.length = diff.size()
k < resultBuffer.length; // reminder: resultBuffer.length = diff.size()
k++) {
for (int l = 0; l < resultBuffer[k].getFoundCount(); l++) {
// k represents index resultBuffer[k] and thereby the isotope number
// l represents the number of results in resultBuffer[k]
candidates.get(k).checkForBetterRating(groupedPeaks, 0, resultBuffer[k].getRow(l), diff.get(k), minRating);
}
}
if (!checkIfAllTrue(candidates.getCandidates())) {
finishedRows++;
// jump to next i
continue;
}
String comParent = "", comChild = "";
PeakListRow originalChild = getRowFromCandidate(candidates, 0, plh);
if (originalChild == null) {
finishedRows++;
continue;
}
PeakListRow child = copyPeakRow(originalChild);
if (resultMap.containsID(child.getID()))
comChild += resultMap.getRowByID(child.getID()).getComment();
comChild += "Parent ID: " + candidates.get(1).getCandID();
addComment(child, comChild);
List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
boolean allPeaksAddable = true;
rowBuffer.add(child);
for (// we skip k=0 because == groupedPeaks[0] which we
int k = 1; // we skip k=0 because == groupedPeaks[0] which we
k < candidates.size(); // we skip k=0 because == groupedPeaks[0] which we
k++) // added before
{
PeakListRow originalParent = getRowFromCandidate(candidates, 1, plh);
if (originalParent == null) {
allPeaksAddable = false;
continue;
}
PeakListRow parent = copyPeakRow(originalParent);
if (resultMap.containsID(parent.getID()))
comParent += resultMap.getRowByID(parent.getID()).getComment();
comParent += ("[--IS PARENT-- child ID: " + child.getID() + " ] | ");
addComment(parent, comParent);
addComment(child, " m/z shift(ppm): " + round(((parent.getAverageMZ() - child.getAverageMZ()) - diff.get(1)) / parent.getAverageMZ() * 1E6, 2) + " ");
rowBuffer.add(parent);
}
if (allPeaksAddable)
for (PeakListRow row : rowBuffer) resultMap.addRow(row);
if (isCanceled())
return;
finishedRows++;
}
ArrayList<Integer> keys = resultMap.getAllKeys();
for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
if (resultPeakList.getNumberOfRows() > 1)
addResultToProject();
else
message = "Element not found.";
setStatus(TaskStatus.FINISHED);
}
Aggregations