use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class CustomDBSearchTask method processOneLine.
private void processOneLine(String[] values) {
int numOfColumns = Math.min(fieldOrder.length, values.length);
String lineID = null, lineName = null, lineFormula = null;
double lineMZ = 0, lineRT = 0;
for (int i = 0; i < numOfColumns; i++) {
if (fieldOrder[i] == FieldItem.FIELD_ID)
lineID = values[i];
if (fieldOrder[i] == FieldItem.FIELD_NAME)
lineName = values[i];
if (fieldOrder[i] == FieldItem.FIELD_FORMULA)
lineFormula = values[i];
if (fieldOrder[i] == FieldItem.FIELD_MZ)
lineMZ = Double.parseDouble(values[i]);
if (fieldOrder[i] == FieldItem.FIELD_RT)
lineRT = Double.parseDouble(values[i]);
}
SimplePeakIdentity newIdentity = new SimplePeakIdentity(lineName, lineFormula, dataBaseFile.getName(), lineID, null);
for (PeakListRow peakRow : peakList.getRows()) {
Range<Double> mzRange = mzTolerance.getToleranceRange(peakRow.getAverageMZ());
Range<Double> rtRange = rtTolerance.getToleranceRange(peakRow.getAverageRT());
boolean mzMatches = (lineMZ == 0d) || mzRange.contains(lineMZ);
boolean rtMatches = (lineRT == 0d) || rtRange.contains(lineRT);
if (mzMatches && rtMatches) {
logger.finest("Found compound " + lineName + " (m/z " + lineMZ + ", RT " + lineRT + ")");
// add new identity to the row
peakRow.addPeakIdentity(newIdentity, false);
// Notify the GUI about the change in the project
MZmineCore.getProjectManager().getCurrentProject().notifyObjectChanged(peakRow, false);
}
}
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class SameRangeTask method run.
public void run() {
logger.info("Started gap-filling " + peakList);
setStatus(TaskStatus.PROCESSING);
// Get total number of rows
totalRows = peakList.getNumberOfRows();
// Get feature list columns
RawDataFile[] columns = peakList.getRawDataFiles();
// Create new feature list
processedPeakList = new SimplePeakList(peakList + " " + suffix, columns);
/**
***********************************************************
* Creating a stream to process the data in parallel
*/
processedRowsAtomic = new AtomicInteger(0);
List<PeakListRow> outputList = Collections.synchronizedList(new ArrayList<>());
peakList.parallelStream().forEach(sourceRow -> {
// Canceled?
if (isCanceled())
return;
PeakListRow newRow = new SimplePeakListRow(sourceRow.getID());
// Copy comment
newRow.setComment(sourceRow.getComment());
// Copy identities
for (PeakIdentity ident : sourceRow.getPeakIdentities()) newRow.addPeakIdentity(ident, false);
if (sourceRow.getPreferredPeakIdentity() != null)
newRow.setPreferredPeakIdentity(sourceRow.getPreferredPeakIdentity());
// Copy each peaks and fill gaps
for (RawDataFile column : columns) {
// Canceled?
if (isCanceled())
return;
// Get current peak
Feature currentPeak = sourceRow.getPeak(column);
// If there is a gap, try to fill it
if (currentPeak == null)
currentPeak = fillGap(sourceRow, column);
// If a peak was found or created, add it
if (currentPeak != null)
newRow.addPeak(column, currentPeak);
}
outputList.add(newRow);
processedRowsAtomic.getAndAdd(1);
});
outputList.stream().forEach(newRow -> {
processedPeakList.addRow((PeakListRow) newRow);
});
// Canceled?
if (isCanceled())
return;
// Append processed feature list to the project
project.addPeakList(processedPeakList);
// Add quality parameters to peaks
QualityParameters.calculateQualityParameters(processedPeakList);
// Add task description to peakList
processedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Gap filling using RT and m/z range", parameters));
// Remove the original peaklist if requested
if (removeOriginal)
project.removePeakList(peakList);
setStatus(TaskStatus.FINISHED);
logger.info("Finished gap-filling " + peakList);
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NeutralLossFilterTask method run.
@Override
public void run() {
setStatus(TaskStatus.PROCESSING);
totalRows = peakList.getNumberOfRows();
ArrayList<Double> diff = setUpDiff();
if (diff == null || Double.compare(dMassLoss, 0.0d) == 0) {
setErrorMessage("Could not set up neutral loss. Mass loss could not be calculated from the formula or is 0.0");
setStatus(TaskStatus.ERROR);
return;
}
if (suffix.equals("auto")) {
if (molecule.equals(""))
suffix = " NL: " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
else
suffix = " NL (" + molecule + "): " + dMassLoss + " RTtol: " + rtTolerance.getTolerance() + "_results";
}
// get all rows and sort by m/z
PeakListRow[] rows = peakList.getRows();
Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
PeakListHandler plh = new PeakListHandler();
plh.setUp(peakList);
resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
PeakListHandler resultMap = new PeakListHandler();
for (int i = 0; i < totalRows; i++) {
// i will represent the index of the row in peakList
if (rows[i].getPeakIdentities().length > 0) {
finishedRows++;
continue;
}
message = "Row " + i + "/" + totalRows;
// now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
// pL[index].mz+maxMass
ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff.get(diff.size() - 1).doubleValue());
if (groupedPeaks.size() < 2) {
finishedRows++;
continue;
}
// this will store row indexes of
ResultBuffer[] resultBuffer = new ResultBuffer[diff.size()];
// and mz
for (// resultBuffer[i] index will represent Isotope[i] (if
int a = 0; // resultBuffer[i] index will represent Isotope[i] (if
a < diff.size(); // resultBuffer[i] index will represent Isotope[i] (if
a++) // numAtoms = 0)
// [0] will be the isotope with lowest mass#
resultBuffer[a] = new ResultBuffer();
for (// go through all possible peaks
int j = 0; // go through all possible peaks
j < groupedPeaks.size(); // go through all possible peaks
j++) {
for (// check for each peak if it is a possible feature for
int k = 0; // check for each peak if it is a possible feature for
k < diff.size(); // check for each peak if it is a possible feature for
k++) // every diff[](isotope)
{
// k represents the isotope number the peak will be a candidate for
if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff.get(k), groupedPeaks.get(j).getAverageMZ())) {
// this will automatically add groupedPeaks[0] to the list -> isotope with
// lowest mass
// +1 result for isotope k
resultBuffer[k].addFound();
// row in groupedPeaks[]
resultBuffer[k].addRow(j);
resultBuffer[k].addID(groupedPeaks.get(j).getID());
}
}
}
if (// this means that for every isotope we expected to find,
!checkIfAllTrue(resultBuffer)) // we found one or more possible features
{
finishedRows++;
continue;
}
Candidates candidates = new Candidates(diff.size(), minHeight, mzTolerance, plh);
for (// reminder: resultBuffer.length = diff.size()
int k = 0; // reminder: resultBuffer.length = diff.size()
k < resultBuffer.length; // reminder: resultBuffer.length = diff.size()
k++) {
for (int l = 0; l < resultBuffer[k].getFoundCount(); l++) {
// k represents index resultBuffer[k] and thereby the isotope number
// l represents the number of results in resultBuffer[k]
candidates.get(k).checkForBetterRating(groupedPeaks, 0, resultBuffer[k].getRow(l), diff.get(k), minRating);
}
}
if (!checkIfAllTrue(candidates.getCandidates())) {
finishedRows++;
// jump to next i
continue;
}
String comParent = "", comChild = "";
PeakListRow originalChild = getRowFromCandidate(candidates, 0, plh);
if (originalChild == null) {
finishedRows++;
continue;
}
PeakListRow child = copyPeakRow(originalChild);
if (resultMap.containsID(child.getID()))
comChild += resultMap.getRowByID(child.getID()).getComment();
comChild += "Parent ID: " + candidates.get(1).getCandID();
addComment(child, comChild);
List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
boolean allPeaksAddable = true;
rowBuffer.add(child);
for (// we skip k=0 because == groupedPeaks[0] which we
int k = 1; // we skip k=0 because == groupedPeaks[0] which we
k < candidates.size(); // we skip k=0 because == groupedPeaks[0] which we
k++) // added before
{
PeakListRow originalParent = getRowFromCandidate(candidates, 1, plh);
if (originalParent == null) {
allPeaksAddable = false;
continue;
}
PeakListRow parent = copyPeakRow(originalParent);
if (resultMap.containsID(parent.getID()))
comParent += resultMap.getRowByID(parent.getID()).getComment();
comParent += ("[--IS PARENT-- child ID: " + child.getID() + " ] | ");
addComment(parent, comParent);
addComment(child, " m/z shift(ppm): " + round(((parent.getAverageMZ() - child.getAverageMZ()) - diff.get(1)) / parent.getAverageMZ() * 1E6, 2) + " ");
rowBuffer.add(parent);
}
if (allPeaksAddable)
for (PeakListRow row : rowBuffer) resultMap.addRow(row);
if (isCanceled())
return;
finishedRows++;
}
ArrayList<Integer> keys = resultMap.getAllKeys();
for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
if (resultPeakList.getNumberOfRows() > 1)
addResultToProject();
else
message = "Element not found.";
setStatus(TaskStatus.FINISHED);
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NeutralLossFilterTask method groupPeaks.
/**
* @param pL
* @param parentIndex index of possible parent peak
* @param maxMass
* @return will return ArrayList<PeakListRow> of all peaks within the range of pL[parentIndex].mz
* -> pL[parentIndex].mz+maxMass
*/
private ArrayList<PeakListRow> groupPeaks(PeakListRow[] pL, int parentIndex, double maxDiff) {
ArrayList<PeakListRow> buf = new ArrayList<PeakListRow>();
// this means the result will contain row(parentIndex) itself
buf.add(pL[parentIndex]);
double mz = pL[parentIndex].getAverageMZ();
double rt = pL[parentIndex].getAverageRT();
for (// will not add the parent peak itself
int i = parentIndex + 1; // will not add the parent peak itself
i < pL.length; // will not add the parent peak itself
i++) {
PeakListRow r = pL[i];
if (r.getAverageHeight() < minHeight)
continue;
if (!rtTolerance.checkWithinTolerance(rt, r.getAverageRT()) && checkRT)
continue;
if (pL[i].getAverageMZ() > mz && pL[i].getAverageMZ() <= (mz + maxDiff + mzTolerance.getMzTolerance())) {
buf.add(pL[i]);
}
if (// since pL is sorted by ascending mass, we can
pL[i].getAverageMZ() > (mz + maxDiff))
// stop now
return buf;
}
return buf;
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class PeakFilterTask method filterPeakList.
/**
* Filter the feature list.
*
* @param peakList feature list to filter.
* @return a new feature list with entries of the original feature list that pass the filtering.
*/
private PeakList filterPeakList(final PeakList peakList) {
// Make a copy of the peakList
final PeakList newPeakList = new SimplePeakList(peakList.getName() + ' ' + parameters.getParameter(RowsFilterParameters.SUFFIX).getValue(), peakList.getRawDataFiles());
// Get parameters - which filters are active
final boolean filterByDuration = parameters.getParameter(PeakFilterParameters.PEAK_DURATION).getValue();
final boolean filterByArea = parameters.getParameter(PeakFilterParameters.PEAK_AREA).getValue();
final boolean filterByHeight = parameters.getParameter(PeakFilterParameters.PEAK_HEIGHT).getValue();
final boolean filterByDatapoints = parameters.getParameter(PeakFilterParameters.PEAK_DATAPOINTS).getValue();
final boolean filterByFWHM = parameters.getParameter(PeakFilterParameters.PEAK_FWHM).getValue();
final boolean filterByTailingFactor = parameters.getParameter(PeakFilterParameters.PEAK_TAILINGFACTOR).getValue();
final boolean filterByAsymmetryFactor = parameters.getParameter(PeakFilterParameters.PEAK_ASYMMETRYFACTOR).getValue();
final boolean filterByMS2 = parameters.getParameter(PeakFilterParameters.MS2_Filter).getValue();
// Loop through all rows in feature list
final PeakListRow[] rows = peakList.getRows();
totalRows = rows.length;
for (processedRows = 0; !isCanceled() && processedRows < totalRows; processedRows++) {
final PeakListRow row = rows[processedRows];
final RawDataFile[] rawdatafiles = row.getRawDataFiles();
int totalRawDataFiles = rawdatafiles.length;
boolean[] keepPeak = new boolean[totalRawDataFiles];
for (int i = 0; i < totalRawDataFiles; i++) {
// Peak values
keepPeak[i] = true;
final Feature peak = row.getPeak(rawdatafiles[i]);
final double peakDuration = peak.getRawDataPointsRTRange().upperEndpoint() - peak.getRawDataPointsRTRange().lowerEndpoint();
final double peakArea = peak.getArea();
final double peakHeight = peak.getHeight();
final int peakDatapoints = peak.getScanNumbers().length;
final int msmsScanNumber = peak.getMostIntenseFragmentScanNumber();
Double peakFWHM = peak.getFWHM();
Double peakTailingFactor = peak.getTailingFactor();
Double peakAsymmetryFactor = peak.getAsymmetryFactor();
if (peakFWHM == null) {
peakFWHM = -1.0;
}
if (peakTailingFactor == null) {
peakTailingFactor = -1.0;
}
if (peakAsymmetryFactor == null) {
peakAsymmetryFactor = -1.0;
}
// Check Duration
if (filterByDuration) {
final Range<Double> durationRange = parameters.getParameter(PeakFilterParameters.PEAK_DURATION).getEmbeddedParameter().getValue();
if (!durationRange.contains(peakDuration)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check Area
if (filterByArea) {
final Range<Double> areaRange = parameters.getParameter(PeakFilterParameters.PEAK_AREA).getEmbeddedParameter().getValue();
if (!areaRange.contains(peakArea)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check Height
if (filterByHeight) {
final Range<Double> heightRange = parameters.getParameter(PeakFilterParameters.PEAK_HEIGHT).getEmbeddedParameter().getValue();
if (!heightRange.contains(peakHeight)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check # Data Points
if (filterByDatapoints) {
final Range<Integer> datapointsRange = parameters.getParameter(PeakFilterParameters.PEAK_DATAPOINTS).getEmbeddedParameter().getValue();
if (!datapointsRange.contains(peakDatapoints)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check FWHM
if (filterByFWHM) {
final Range<Double> fwhmRange = parameters.getParameter(PeakFilterParameters.PEAK_FWHM).getEmbeddedParameter().getValue();
if (!fwhmRange.contains(peakFWHM)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check Tailing Factor
if (filterByTailingFactor) {
final Range<Double> tailingRange = parameters.getParameter(PeakFilterParameters.PEAK_TAILINGFACTOR).getEmbeddedParameter().getValue();
if (!tailingRange.contains(peakTailingFactor)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check height
if (filterByAsymmetryFactor) {
final Range<Double> asymmetryRange = parameters.getParameter(PeakFilterParameters.PEAK_ASYMMETRYFACTOR).getEmbeddedParameter().getValue();
if (!asymmetryRange.contains(peakAsymmetryFactor)) {
// Mark peak to be removed
keepPeak[i] = false;
}
}
// Check MS/MS filter
if (filterByMS2) {
if (msmsScanNumber < 1)
keepPeak[i] = false;
}
}
// empty row?
boolean isEmpty = Booleans.asList(keepPeak).stream().allMatch(keep -> !keep);
if (!isEmpty)
newPeakList.addRow(copyPeakRow(row, keepPeak));
}
return newPeakList;
}
Aggregations