use of net.sf.mzmine.datamodel.IsotopePattern in project mzmine2 by mzmine.
the class SimplePeakListRow method getBestIsotopePattern.
/**
* Returns the highest isotope pattern of a peak in this row
*/
@Override
public IsotopePattern getBestIsotopePattern() {
Feature[] peaks = getPeaks();
Arrays.sort(peaks, new PeakSorter(SortingProperty.Height, SortingDirection.Descending));
for (Feature peak : peaks) {
IsotopePattern ip = peak.getIsotopePattern();
if (ip != null)
return ip;
}
return null;
}
use of net.sf.mzmine.datamodel.IsotopePattern in project mzmine2 by mzmine.
the class IsotopePeakScannerTask method run.
@Override
public void run() {
if (!checkParameters())
return;
setStatus(TaskStatus.PROCESSING);
totalRows = peakList.getNumberOfRows();
double[][] diff = setUpDiffAutoCarbon();
if (diff == null) {
message = "ERROR: could not set up diff.";
setStatus(TaskStatus.ERROR);
return;
}
logger.info("diff.length: " + diff.length);
logger.info("maxPatternIndex: " + maxPatternIndex);
logger.info("maxPatternSize: " + maxPatternSize);
// get all rows and sort by m/z
PeakListRow[] rows = peakList.getRows();
Arrays.sort(rows, new PeakListRowSorter(SortingProperty.MZ, SortingDirection.Ascending));
PeakListHandler plh = new PeakListHandler();
plh.setUp(peakList);
resultPeakList = new SimplePeakList(peakList.getName() + suffix, peakList.getRawDataFiles());
PeakListHandler resultMap = new PeakListHandler();
for (int i = 0; i < totalRows; i++) {
// i will represent the index of the row in peakList
if (rows[i].getPeakIdentities().length > 0) {
finishedRows++;
continue;
}
message = "Row " + i + "/" + totalRows;
// now get all peaks that lie within RT and maxIsotopeMassRange: pL[index].mz ->
// pL[index].mz+maxMass
ArrayList<PeakListRow> groupedPeaks = groupPeaks(rows, i, diff[maxPatternIndex][diff[maxPatternIndex].length - 1]);
if (groupedPeaks.size() < 2) {
finishedRows++;
continue;
}
// else
// logger.info("groupedPeaks.size > 2 in row: " + i + " size: " +
// groupedPeaks.size());
// this will store row
ResultBuffer[][] resultBuffer = new ResultBuffer[diff.length][];
for (int p = 0; p < diff.length; p++) {
// resultBuffer[i] index will represent Isotope[i] (if
// numAtoms = 0)
resultBuffer[p] = new ResultBuffer[diff[p].length];
for (int k = 0; k < diff[p].length; k++) // [p][0] will be the isotope with lowest mass#
resultBuffer[p][k] = new ResultBuffer();
}
// of all features with fitting rt
// and mz
boolean[] trueBuffers = new boolean[diff.length];
Arrays.fill(trueBuffers, false);
for (// go through all possible peaks
int j = 0; // go through all possible peaks
j < groupedPeaks.size(); // go through all possible peaks
j++) {
for (int p = 0; p < diff.length; p++) {
for (// check for each peak if it is a possible
int k = 0; // check for each peak if it is a possible
k < diff[p].length; // check for each peak if it is a possible
k++) // feature
// for
// every diff[](isotope)
{
// p = pattern index for autoCarbon
if (mzTolerance.checkWithinTolerance(groupedPeaks.get(0).getAverageMZ() + diff[p][k], groupedPeaks.get(j).getAverageMZ())) {
// this will automatically add groupedPeaks[0] to the list -> isotope with
// lowest mass
// +1 result for isotope k
resultBuffer[p][k].addFound();
// row in groupedPeaks[]
resultBuffer[p][k].addRow(j);
resultBuffer[p][k].addID(groupedPeaks.get(j).getID());
}
}
}
}
boolean foundOne = false;
for (int p = 0; p < diff.length; p++) if (checkIfAllTrue(resultBuffer[p])) {
// this means that for every isotope we expected to
// find,
// we found one or more possible features
foundOne = true;
trueBuffers[p] = true;
// logger.info("Row: " + i + " filled buffer[" + p +"]");
}
if (!foundOne) {
finishedRows++;
continue;
}
Candidates[] candidates = new Candidates[diff.length];
for (int p = 0; p < diff.length; p++) candidates[p] = new Candidates(diff[p].length, minHeight, mzTolerance, pattern[p], massListName, plh, ratingType);
for (int p = 0; p < diff.length; p++) {
if (!trueBuffers[p])
continue;
for (// reminder: resultBuffer.length =
int k = 0; // reminder: resultBuffer.length =
k < resultBuffer[p].length; // reminder: resultBuffer.length =
k++) // diff.length
{
for (int l = 0; l < resultBuffer[p][k].getFoundCount(); l++) {
// k represents index resultBuffer[k] and thereby the isotope number
// l represents the number of results in resultBuffer[k]
candidates[p].checkForBetterRating(k, groupedPeaks.get(0), groupedPeaks.get(resultBuffer[p][k].getRow(l)), minRating, checkIntensity);
}
}
}
foundOne = false;
boolean[] trueCandidates = new boolean[diff.length];
Arrays.fill(trueCandidates, false);
for (int p = 0; p < diff.length; p++) {
if (trueBuffers[p] && checkIfAllTrue(candidates[p].getCandidates())) {
trueCandidates[p] = true;
foundOne = true;
// logger.info("Row: " + i + " filled candidates[" + p + "]");
}
}
if (!foundOne) {
finishedRows++;
// jump to next i
continue;
}
// find best result now, first we have to calc avg ratings if specified by user
int bestPatternIndex = 0;
double bestRating = 0.0;
for (int p = 0; p < diff.length; p++) {
if (!trueCandidates[p])
continue;
if (accurateAvgIntensity)
candidates[p].calcAvgRatings();
if (accurateAvgIntensity && candidates[p].getAvgAccAvgRating() > bestRating) {
bestPatternIndex = p;
bestRating = candidates[p].getAvgAccAvgRating();
} else if (!accurateAvgIntensity && candidates[p].getSimpleAvgRating() > bestRating) {
bestPatternIndex = p;
bestRating = candidates[p].getSimpleAvgRating();
}
}
if (!checkIfAllTrue(candidates[bestPatternIndex].getCandidates())) {
logger.warning("We were about to add candidates with null pointers.\nThis was no valid result. Continueing.");
continue;
}
// TODO: this shouldnt be needed, fix the bug that causes the crash later on.
// this happens occasionally if the user wants to do accurate average but does not filter
// by RT. then possible isotope peaks are found, although they are not detected at the same
// time. This will result in the candidates return -1.0 which will sooner or later return a
// null pointer Fixing this will be done in a future update, but needs a rework of the
// candidates class.
// The results you miss by skipping here would have not been valid results anyway, so this
// is not urgent. Will be nicer though, because of cleaner code.
// PeakListRow parent = copyPeakRow(peakList.getRow(i));
boolean allPeaksAddable = true;
List<PeakListRow> rowBuffer = new ArrayList<PeakListRow>();
PeakListRow original = getRowFromCandidate(candidates, bestPatternIndex, 0, plh);
if (original == null)
continue;
PeakListRow parent = copyPeakRow(original);
if (// if we can assign this row multiple times we
resultMap.containsID(parent.getID()))
// have to copy the comment, because adding it to
// the map twice will overwrite the results
addComment(parent, resultMap.getRowByID(parent.getID()).getComment());
// ID is added to be able to sort by
addComment(parent, parent.getID() + "--IS PARENT--");
if (carbonRange != 1)
addComment(parent, "BestPattern: " + pattern[bestPatternIndex].getDescription());
rowBuffer.add(parent);
DataPoint[] dp = new DataPoint[pattern[bestPatternIndex].getNumberOfDataPoints()];
if (accurateAvgIntensity) {
dp[0] = new SimpleDataPoint(parent.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(0));
} else {
dp[0] = new SimpleDataPoint(parent.getAverageMZ(), parent.getAverageHeight());
}
for (// we skip k=0 because ==
int k = 1; // we skip k=0 because ==
k < candidates[bestPatternIndex].size(); // we skip k=0 because ==
k++) // groupedPeaks[0]/
// ==candidates.get(0) which we added before
{
PeakListRow originalChild = getRowFromCandidate(candidates, bestPatternIndex, k, plh);
if (originalChild == null) {
allPeaksAddable = false;
continue;
}
PeakListRow child = copyPeakRow(originalChild);
if (accurateAvgIntensity) {
dp[k] = new SimpleDataPoint(child.getAverageMZ(), candidates[bestPatternIndex].getAvgHeight(k));
} else {
dp[k] = new SimpleDataPoint(child.getAverageMZ(), child.getAverageHeight());
}
String average = "";
if (accurateAvgIntensity) {
average = " AvgRating: " + round(candidates[bestPatternIndex].getAvgRating(k), 3);
}
addComment(parent, "Intensity ratios: " + getIntensityRatios(pattern[bestPatternIndex], pattern[bestPatternIndex].getHighestDataPointIndex()));
if (accurateAvgIntensity)
addComment(parent, " Avg pattern rating: " + round(candidates[bestPatternIndex].getAvgAccAvgRating(), 3));
else
addComment(parent, " pattern rating: " + round(candidates[bestPatternIndex].getSimpleAvgRating(), 3));
addComment(child, (parent.getID() + "-Parent ID" + " m/z-shift(ppm): " + round(((child.getAverageMZ() - parent.getAverageMZ()) - diff[bestPatternIndex][k]) / child.getAverageMZ() * 1E6, 2) + " I(c)/I(p): " + round(child.getAverageHeight() / plh.getRowByID(candidates[bestPatternIndex].get(pattern[bestPatternIndex].getHighestDataPointIndex()).getCandID()).getAverageHeight(), 2) + " Identity: " + pattern[bestPatternIndex].getIsotopeComposition(k) + " Rating: " + round(candidates[bestPatternIndex].get(k).getRating(), 3) + average));
rowBuffer.add(child);
}
if (!allPeaksAddable)
continue;
IsotopePattern resultPattern = new SimpleIsotopePattern(dp, IsotopePatternStatus.DETECTED, element + " monoisotopic mass: " + parent.getAverageMZ());
parent.getBestPeak().setIsotopePattern(resultPattern);
for (PeakListRow row : rowBuffer) {
row.getBestPeak().setIsotopePattern(resultPattern);
resultMap.addRow(row);
}
if (isCanceled())
return;
finishedRows++;
}
ArrayList<Integer> keys = resultMap.getAllKeys();
for (int j = 0; j < keys.size(); j++) resultPeakList.addRow(resultMap.getRowByID(keys.get(j)));
if (resultPeakList.getNumberOfRows() > 1)
addResultToProject();
else
message = "Element not found.";
setStatus(TaskStatus.FINISHED);
}
use of net.sf.mzmine.datamodel.IsotopePattern in project mzmine2 by mzmine.
the class SQLExportTask method exportPeakListRow.
private void exportPeakListRow(PeakListRow row) throws SQLException {
// Cancel?
if (isCanceled()) {
return;
}
// Value for looping through raw data files
boolean loopDataFiles = false;
StringBuilder sql = new StringBuilder();
sql.append("INSERT INTO ");
sql.append(tableName);
sql.append(" (");
for (int i = 0; i < exportColumns.getRowCount(); i++) {
sql.append(exportColumns.getValueAt(i, 0));
if (i < exportColumns.getRowCount() - 1)
sql.append(",");
}
sql.append(" ) VALUES (");
for (int i = 0; i < exportColumns.getRowCount(); i++) {
sql.append("?");
if (i < exportColumns.getRowCount() - 1)
sql.append(",");
}
sql.append(")");
PreparedStatement statement = dbConnection.prepareStatement(sql.toString());
if (row == null) {
for (int i = 0; i < exportColumns.getRowCount(); i++) {
SQLExportDataType dataType = (SQLExportDataType) exportColumns.getValueAt(i, 1);
String dataValue = (String) exportColumns.getValueAt(i, 2);
switch(dataType) {
case CONSTANT:
statement.setString(i + 1, dataValue);
break;
case RAWFILE:
RawDataFile[] rawdatafiles = peakList.getRawDataFiles();
statement.setString(i + 1, rawdatafiles[0].getName());
break;
default:
statement.setString(i + 1, null);
break;
}
}
statement.executeUpdate();
} else {
for (RawDataFile rawDataFile : row.getRawDataFiles()) {
Feature peak = row.getPeak(rawDataFile);
for (int i = 0; i < exportColumns.getRowCount(); i++) {
SQLExportDataType dataType = (SQLExportDataType) exportColumns.getValueAt(i, 1);
String dataValue = (String) exportColumns.getValueAt(i, 2);
switch(dataType) {
case CONSTANT:
statement.setString(i + 1, dataValue);
break;
case MZ:
statement.setDouble(i + 1, row.getAverageMZ());
break;
case RT:
statement.setDouble(i + 1, row.getAverageRT());
break;
case ID:
statement.setInt(i + 1, row.getID());
break;
case PEAKCHARGE:
statement.setDouble(i + 1, peak.getCharge());
loopDataFiles = true;
break;
case PEAKDURATION:
statement.setDouble(i + 1, RangeUtils.rangeLength(peak.getRawDataPointsRTRange()));
loopDataFiles = true;
break;
case PEAKSTATUS:
statement.setString(i + 1, peak.getFeatureStatus().name());
loopDataFiles = true;
break;
case PEAKMZ:
statement.setDouble(i + 1, peak.getMZ());
loopDataFiles = true;
break;
case PEAKRT:
statement.setDouble(i + 1, peak.getRT());
loopDataFiles = true;
break;
case PEAKRT_START:
statement.setDouble(i + 1, peak.getRawDataPointsRTRange().lowerEndpoint());
loopDataFiles = true;
break;
case PEAKRT_END:
statement.setDouble(i + 1, peak.getRawDataPointsRTRange().upperEndpoint());
loopDataFiles = true;
break;
case PEAKHEIGHT:
statement.setDouble(i + 1, peak.getHeight());
loopDataFiles = true;
break;
case PEAKAREA:
statement.setDouble(i + 1, peak.getArea());
loopDataFiles = true;
break;
case DATAPOINTS:
statement.setDouble(i + 1, peak.getScanNumbers().length);
loopDataFiles = true;
break;
case FWHM:
statement.setDouble(i + 1, peak.getFWHM());
loopDataFiles = true;
break;
case TAILINGFACTOR:
statement.setDouble(i + 1, peak.getTailingFactor());
loopDataFiles = true;
break;
case ASYMMETRYFACTOR:
statement.setDouble(i + 1, peak.getAsymmetryFactor());
loopDataFiles = true;
break;
case RAWFILE:
statement.setString(i + 1, rawDataFile.getName());
loopDataFiles = true;
break;
case HEIGHT:
statement.setDouble(i + 1, row.getAverageHeight());
break;
case AREA:
statement.setDouble(i + 1, row.getAverageArea());
break;
case COMMENT:
statement.setString(i + 1, row.getComment());
break;
case IDENTITY:
PeakIdentity id = row.getPreferredPeakIdentity();
if (id != null) {
statement.setString(i + 1, id.getName());
} else {
statement.setNull(i + 1, Types.VARCHAR);
}
break;
case ISOTOPEPATTERN:
IsotopePattern isotopes = row.getBestIsotopePattern();
if (isotopes == null) {
statement.setNull(i + 1, Types.BLOB);
break;
}
DataPoint[] dataPoints = isotopes.getDataPoints();
byte[] bytes = ScanUtils.encodeDataPointsToBytes(dataPoints);
ByteArrayInputStream is = new ByteArrayInputStream(bytes);
statement.setBlob(i + 1, is);
break;
case MSMS:
int msmsScanNum = row.getBestPeak().getMostIntenseFragmentScanNumber();
// Check if there is any MS/MS scan
if (msmsScanNum <= 0) {
statement.setNull(i + 1, Types.BLOB);
break;
}
RawDataFile dataFile = row.getBestPeak().getDataFile();
Scan msmsScan = dataFile.getScan(msmsScanNum);
MassList msmsMassList = msmsScan.getMassList(dataValue);
// Check if there is a masslist for the scan
if (msmsMassList == null) {
statement.setNull(i + 1, Types.BLOB);
break;
}
dataPoints = msmsMassList.getDataPoints();
bytes = ScanUtils.encodeDataPointsToBytes(dataPoints);
is = new ByteArrayInputStream(bytes);
statement.setBlob(i + 1, is);
break;
default:
break;
}
}
statement.executeUpdate();
// data files in feature list
if (!loopDataFiles) {
break;
}
}
}
}
use of net.sf.mzmine.datamodel.IsotopePattern in project mzmine2 by mzmine.
the class RTScore method calculateScore.
public double calculateScore(AlignmentPath path, PeakListRow peak, ParameterSet parameters) {
try {
rtTolerance = parameters.getParameter(PathAlignerParameters.RTTolerance).getValue();
mzTolerance = parameters.getParameter(PathAlignerParameters.MZTolerance).getValue();
Range<Double> rtRange = rtTolerance.getToleranceRange(path.getRT());
Range<Double> mzRange = mzTolerance.getToleranceRange(path.getMZ());
if (!rtRange.contains(peak.getAverageRT()) || !mzRange.contains(peak.getAverageMZ())) {
return WORST_SCORE;
}
double mzDiff = Math.abs(path.getMZ() - peak.getAverageMZ());
double rtDiff = Math.abs(path.getRT() - peak.getAverageRT());
double score = ((mzDiff / (RangeUtils.rangeLength(mzRange) / 2.0))) + ((rtDiff / (RangeUtils.rangeLength(rtRange) / 2.0)));
if (parameters.getParameter(PathAlignerParameters.SameChargeRequired).getValue()) {
if (!PeakUtils.compareChargeState(path.convertToAlignmentRow(0), peak)) {
return WORST_SCORE;
}
}
if (parameters.getParameter(PathAlignerParameters.SameIDRequired).getValue()) {
if (!PeakUtils.compareIdentities(path.convertToAlignmentRow(0), peak)) {
return WORST_SCORE;
}
}
if (parameters.getParameter(PathAlignerParameters.compareIsotopePattern).getValue()) {
IsotopePattern ip1 = path.convertToAlignmentRow(0).getBestIsotopePattern();
IsotopePattern ip2 = peak.getBestIsotopePattern();
if ((ip1 != null) && (ip2 != null)) {
ParameterSet isotopeParams = parameters.getParameter(PathAlignerParameters.compareIsotopePattern).getEmbeddedParameters();
if (!IsotopePatternScoreCalculator.checkMatch(ip1, ip2, isotopeParams)) {
return WORST_SCORE;
}
}
}
return score;
} catch (NullPointerException e) {
e.printStackTrace();
return WORST_SCORE;
}
}
use of net.sf.mzmine.datamodel.IsotopePattern in project mzmine2 by mzmine.
the class DPPIsotopeGrouperTask method compressIsotopeDataSets.
/**
* This method generates a single IsotopesDataSet from all detected isotope patterns in the
* results.
*
* @param dataPoints
* @return
*/
private IsotopesDataSet compressIsotopeDataSets(ProcessedDataPoint[] dataPoints) {
List<IsotopePattern> list = new ArrayList<>();
for (ProcessedDataPoint dp : dataPoints) {
if (dp.resultTypeExists(ResultType.ISOTOPEPATTERN)) {
list.add(((DPPIsotopePatternResult) dp.getFirstResultByType(ResultType.ISOTOPEPATTERN)).getValue());
}
}
if (list.isEmpty())
return null;
List<DataPoint> dpList = new ArrayList<>();
for (IsotopePattern pattern : list) {
for (DataPoint dp : pattern.getDataPoints()) dpList.add(dp);
}
if (dpList.isEmpty())
return null;
IsotopePattern full = new SimpleIsotopePattern(dpList.toArray(new DataPoint[0]), IsotopePatternStatus.DETECTED, "Isotope patterns");
return new IsotopesDataSet(full);
}
Aggregations