use of net.sf.mzmine.datamodel.Feature in project mzmine2 by mzmine.
the class ADAP3DecompositionV1_5Task method decomposePeaks.
private PeakList decomposePeaks(PeakList peakList) throws CloneNotSupportedException, IOException {
RawDataFile dataFile = peakList.getRawDataFile(0);
// Create new feature list.
final PeakList resolvedPeakList = new SimplePeakList(peakList + " " + parameters.getParameter(ADAP3DecompositionV1_5Parameters.SUFFIX).getValue(), dataFile);
// Load previous applied methods.
for (final PeakList.PeakListAppliedMethod method : peakList.getAppliedMethods()) {
resolvedPeakList.addDescriptionOfAppliedTask(method);
}
// Add task description to feature list.
resolvedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Peak deconvolution by ADAP-3", parameters));
// Collect peak information
List<Peak> peaks = getPeaks(peakList, this.parameters.getParameter(ADAP3DecompositionV1_5Parameters.EDGE_TO_HEIGHT_RATIO).getValue(), this.parameters.getParameter(ADAP3DecompositionV1_5Parameters.DELTA_TO_HEIGHT_RATIO).getValue());
// Find components (a.k.a. clusters of peaks with fragmentation spectra)
List<Component> components = getComponents(peaks);
// Create PeakListRow for each components
List<PeakListRow> newPeakListRows = new ArrayList<>();
int rowID = 0;
for (final Component component : components) {
if (component.getSpectrum().isEmpty())
continue;
PeakListRow row = new SimplePeakListRow(++rowID);
// Add the reference peak
PeakListRow refPeakRow = originalPeakList.getRow(component.getBestPeak().getInfo().peakID);
Feature refPeak = new SimpleFeature(refPeakRow.getBestPeak());
// Add spectrum
List<DataPoint> dataPoints = new ArrayList<>();
for (Map.Entry<Double, Double> entry : component.getSpectrum().entrySet()) {
dataPoints.add(new SimpleDataPoint(entry.getKey(), entry.getValue()));
}
refPeak.setIsotopePattern(new SimpleIsotopePattern(dataPoints.toArray(new DataPoint[dataPoints.size()]), IsotopePattern.IsotopePatternStatus.PREDICTED, "Spectrum"));
row.addPeak(dataFile, refPeak);
// Add PeakInformation
if (refPeakRow.getPeakInformation() == null) {
SimplePeakInformation information = new SimplePeakInformation(new HashMap<>(refPeakRow.getPeakInformation().getAllProperties()));
row.setPeakInformation(information);
}
// Set row properties
row.setAverageMZ(refPeakRow.getAverageMZ());
row.setAverageRT(refPeakRow.getAverageRT());
// resolvedPeakList.addRow(row);
newPeakListRows.add(row);
}
// ------------------------------------
// Sort new peak rows by retention time
// ------------------------------------
Collections.sort(newPeakListRows, new Comparator<PeakListRow>() {
@Override
public int compare(PeakListRow row1, PeakListRow row2) {
double retTime1 = row1.getAverageRT();
double retTime2 = row2.getAverageRT();
return Double.compare(retTime1, retTime2);
}
});
for (PeakListRow row : newPeakListRows) resolvedPeakList.addRow(row);
return resolvedPeakList;
}
use of net.sf.mzmine.datamodel.Feature in project mzmine2 by mzmine.
the class IsotopePeakScannerTask method copyPeakRow.
/**
* Create a copy of a feature list row.
*
* @param row the row to copy.
* @return the newly created copy.
*/
private static PeakListRow copyPeakRow(final PeakListRow row) {
// Copy the feature list row.
final PeakListRow newRow = new SimplePeakListRow(row.getID());
PeakUtils.copyPeakListRowProperties(row, newRow);
// Copy the peaks.
for (final Feature peak : row.getPeaks()) {
final Feature newPeak = new SimpleFeature(peak);
PeakUtils.copyPeakProperties(peak, newPeak);
newRow.addPeak(peak.getDataFile(), newPeak);
}
return newRow;
}
use of net.sf.mzmine.datamodel.Feature in project mzmine2 by mzmine.
the class SQLExportTask method exportPeakListRow.
private void exportPeakListRow(PeakListRow row) throws SQLException {
// Cancel?
if (isCanceled()) {
return;
}
// Value for looping through raw data files
boolean loopDataFiles = false;
StringBuilder sql = new StringBuilder();
sql.append("INSERT INTO ");
sql.append(tableName);
sql.append(" (");
for (int i = 0; i < exportColumns.getRowCount(); i++) {
sql.append(exportColumns.getValueAt(i, 0));
if (i < exportColumns.getRowCount() - 1)
sql.append(",");
}
sql.append(" ) VALUES (");
for (int i = 0; i < exportColumns.getRowCount(); i++) {
sql.append("?");
if (i < exportColumns.getRowCount() - 1)
sql.append(",");
}
sql.append(")");
PreparedStatement statement = dbConnection.prepareStatement(sql.toString());
if (row == null) {
for (int i = 0; i < exportColumns.getRowCount(); i++) {
SQLExportDataType dataType = (SQLExportDataType) exportColumns.getValueAt(i, 1);
String dataValue = (String) exportColumns.getValueAt(i, 2);
switch(dataType) {
case CONSTANT:
statement.setString(i + 1, dataValue);
break;
case RAWFILE:
RawDataFile[] rawdatafiles = peakList.getRawDataFiles();
statement.setString(i + 1, rawdatafiles[0].getName());
break;
default:
statement.setString(i + 1, null);
break;
}
}
statement.executeUpdate();
} else {
for (RawDataFile rawDataFile : row.getRawDataFiles()) {
Feature peak = row.getPeak(rawDataFile);
for (int i = 0; i < exportColumns.getRowCount(); i++) {
SQLExportDataType dataType = (SQLExportDataType) exportColumns.getValueAt(i, 1);
String dataValue = (String) exportColumns.getValueAt(i, 2);
switch(dataType) {
case CONSTANT:
statement.setString(i + 1, dataValue);
break;
case MZ:
statement.setDouble(i + 1, row.getAverageMZ());
break;
case RT:
statement.setDouble(i + 1, row.getAverageRT());
break;
case ID:
statement.setInt(i + 1, row.getID());
break;
case PEAKCHARGE:
statement.setDouble(i + 1, peak.getCharge());
loopDataFiles = true;
break;
case PEAKDURATION:
statement.setDouble(i + 1, RangeUtils.rangeLength(peak.getRawDataPointsRTRange()));
loopDataFiles = true;
break;
case PEAKSTATUS:
statement.setString(i + 1, peak.getFeatureStatus().name());
loopDataFiles = true;
break;
case PEAKMZ:
statement.setDouble(i + 1, peak.getMZ());
loopDataFiles = true;
break;
case PEAKRT:
statement.setDouble(i + 1, peak.getRT());
loopDataFiles = true;
break;
case PEAKRT_START:
statement.setDouble(i + 1, peak.getRawDataPointsRTRange().lowerEndpoint());
loopDataFiles = true;
break;
case PEAKRT_END:
statement.setDouble(i + 1, peak.getRawDataPointsRTRange().upperEndpoint());
loopDataFiles = true;
break;
case PEAKHEIGHT:
statement.setDouble(i + 1, peak.getHeight());
loopDataFiles = true;
break;
case PEAKAREA:
statement.setDouble(i + 1, peak.getArea());
loopDataFiles = true;
break;
case DATAPOINTS:
statement.setDouble(i + 1, peak.getScanNumbers().length);
loopDataFiles = true;
break;
case FWHM:
statement.setDouble(i + 1, peak.getFWHM());
loopDataFiles = true;
break;
case TAILINGFACTOR:
statement.setDouble(i + 1, peak.getTailingFactor());
loopDataFiles = true;
break;
case ASYMMETRYFACTOR:
statement.setDouble(i + 1, peak.getAsymmetryFactor());
loopDataFiles = true;
break;
case RAWFILE:
statement.setString(i + 1, rawDataFile.getName());
loopDataFiles = true;
break;
case HEIGHT:
statement.setDouble(i + 1, row.getAverageHeight());
break;
case AREA:
statement.setDouble(i + 1, row.getAverageArea());
break;
case COMMENT:
statement.setString(i + 1, row.getComment());
break;
case IDENTITY:
PeakIdentity id = row.getPreferredPeakIdentity();
if (id != null) {
statement.setString(i + 1, id.getName());
} else {
statement.setNull(i + 1, Types.VARCHAR);
}
break;
case ISOTOPEPATTERN:
IsotopePattern isotopes = row.getBestIsotopePattern();
if (isotopes == null) {
statement.setNull(i + 1, Types.BLOB);
break;
}
DataPoint[] dataPoints = isotopes.getDataPoints();
byte[] bytes = ScanUtils.encodeDataPointsToBytes(dataPoints);
ByteArrayInputStream is = new ByteArrayInputStream(bytes);
statement.setBlob(i + 1, is);
break;
case MSMS:
int msmsScanNum = row.getBestPeak().getMostIntenseFragmentScanNumber();
// Check if there is any MS/MS scan
if (msmsScanNum <= 0) {
statement.setNull(i + 1, Types.BLOB);
break;
}
RawDataFile dataFile = row.getBestPeak().getDataFile();
Scan msmsScan = dataFile.getScan(msmsScanNum);
MassList msmsMassList = msmsScan.getMassList(dataValue);
// Check if there is a masslist for the scan
if (msmsMassList == null) {
statement.setNull(i + 1, Types.BLOB);
break;
}
dataPoints = msmsMassList.getDataPoints();
bytes = ScanUtils.encodeDataPointsToBytes(dataPoints);
is = new ByteArrayInputStream(bytes);
statement.setBlob(i + 1, is);
break;
default:
break;
}
}
statement.executeUpdate();
// data files in feature list
if (!loopDataFiles) {
break;
}
}
}
}
use of net.sf.mzmine.datamodel.Feature in project mzmine2 by mzmine.
the class IsotopeGrouperTask method fitHalfPattern.
/**
* Helper method for fitPattern. Fits only one half of the pattern.
*
* @param p Pattern is fitted around this peak
* @param charge Charge state of the fitted pattern
* @param direction Defines which half to fit: -1=fit to peaks before start M/Z, +1=fit to peaks
* after start M/Z
* @param fittedPeaks All matching peaks will be added to this set
*/
private void fitHalfPattern(Feature p, int charge, int direction, Vector<Feature> fittedPeaks, Feature[] sortedPeaks) {
// Use M/Z and RT of the strongest peak of the pattern (peak 'p')
double mainMZ = p.getMZ();
double mainRT = p.getRT();
// Variable n is the number of peak we are currently searching. 1=first
// peak before/after start peak, 2=peak before/after previous, 3=...
boolean followingPeakFound;
int n = 1;
do {
// Assume we don't find match for n:th peak in the pattern (which
// will end the loop)
followingPeakFound = false;
// Loop through all peaks, and collect candidates for the n:th peak
// in the pattern
Vector<Feature> goodCandidates = new Vector<Feature>();
for (int ind = 0; ind < sortedPeaks.length; ind++) {
Feature candidatePeak = sortedPeaks[ind];
if (candidatePeak == null)
continue;
// Get properties of the candidate peak
double candidatePeakMZ = candidatePeak.getMZ();
double candidatePeakRT = candidatePeak.getRT();
// Does this peak fill all requirements of a candidate?
// - within tolerances from the expected location (M/Z and RT)
// - not already a fitted peak (only necessary to avoid
// conflicts when parameters are set too wide)
double isotopeMZ = candidatePeakMZ - isotopeDistance * direction * n / (double) charge;
if (mzTolerance.checkWithinTolerance(isotopeMZ, mainMZ) && rtTolerance.checkWithinTolerance(candidatePeakRT, mainRT) && (!fittedPeaks.contains(candidatePeak))) {
goodCandidates.add(candidatePeak);
}
}
// 2.3 and older, only the highest candidate was added)
if (!goodCandidates.isEmpty()) {
fittedPeaks.addAll(goodCandidates);
// n:th peak was found, so let's move on to n+1
n++;
followingPeakFound = true;
}
} while (followingPeakFound);
}
use of net.sf.mzmine.datamodel.Feature in project mzmine2 by mzmine.
the class IsotopeGrouperTask method run.
/**
* @see Runnable#run()
*/
public void run() {
setStatus(TaskStatus.PROCESSING);
logger.info("Running isotopic peak grouper on " + peakList);
// We assume source peakList contains one datafile
RawDataFile dataFile = peakList.getRawDataFile(0);
// Create a new deisotoped peakList
deisotopedPeakList = new SimplePeakList(peakList + " " + suffix, peakList.getRawDataFiles());
// Collect all selected charge states
int[] charges = new int[maximumCharge];
for (int i = 0; i < maximumCharge; i++) charges[i] = i + 1;
// Sort peaks by descending height
Feature[] sortedPeaks = peakList.getPeaks(dataFile);
Arrays.sort(sortedPeaks, new PeakSorter(SortingProperty.Height, SortingDirection.Descending));
// Loop through all peaks
totalPeaks = sortedPeaks.length;
for (int ind = 0; ind < totalPeaks; ind++) {
if (isCanceled())
return;
Feature aPeak = sortedPeaks[ind];
// Check if peak was already deleted
if (aPeak == null) {
processedPeaks++;
continue;
}
// Check which charge state fits best around this peak
int bestFitCharge = 0;
int bestFitScore = -1;
Vector<Feature> bestFitPeaks = null;
for (int charge : charges) {
Vector<Feature> fittedPeaks = new Vector<Feature>();
fittedPeaks.add(aPeak);
fitPattern(fittedPeaks, aPeak, charge, sortedPeaks);
int score = fittedPeaks.size();
if ((score > bestFitScore) || ((score == bestFitScore) && (bestFitCharge > charge))) {
bestFitScore = score;
bestFitCharge = charge;
bestFitPeaks = fittedPeaks;
}
}
PeakListRow oldRow = peakList.getPeakRow(aPeak);
assert bestFitPeaks != null;
// isotope, we skip this left the original peak in the feature list.
if (bestFitPeaks.size() == 1) {
deisotopedPeakList.addRow(oldRow);
processedPeaks++;
continue;
}
// Convert the peak pattern to array
Feature[] originalPeaks = bestFitPeaks.toArray(new Feature[0]);
// Create a new SimpleIsotopePattern
DataPoint[] isotopes = new DataPoint[bestFitPeaks.size()];
for (int i = 0; i < isotopes.length; i++) {
Feature p = originalPeaks[i];
isotopes[i] = new SimpleDataPoint(p.getMZ(), p.getHeight());
}
SimpleIsotopePattern newPattern = new SimpleIsotopePattern(isotopes, IsotopePatternStatus.DETECTED, aPeak.toString());
// the lowest m/z peak
if (chooseMostIntense) {
Arrays.sort(originalPeaks, new PeakSorter(SortingProperty.Height, SortingDirection.Descending));
} else {
Arrays.sort(originalPeaks, new PeakSorter(SortingProperty.MZ, SortingDirection.Ascending));
}
Feature newPeak = new SimpleFeature(originalPeaks[0]);
newPeak.setIsotopePattern(newPattern);
newPeak.setCharge(bestFitCharge);
// Keep old ID
int oldID = oldRow.getID();
SimplePeakListRow newRow = new SimplePeakListRow(oldID);
PeakUtils.copyPeakListRowProperties(oldRow, newRow);
newRow.addPeak(dataFile, newPeak);
deisotopedPeakList.addRow(newRow);
// Remove all peaks already assigned to isotope pattern
for (int i = 0; i < sortedPeaks.length; i++) {
if (bestFitPeaks.contains(sortedPeaks[i]))
sortedPeaks[i] = null;
}
// Update completion rate
processedPeaks++;
}
// Add new peakList to the project
project.addPeakList(deisotopedPeakList);
// Load previous applied methods
for (PeakListAppliedMethod proc : peakList.getAppliedMethods()) {
deisotopedPeakList.addDescriptionOfAppliedTask(proc);
}
// Add task description to peakList
deisotopedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Isotopic peaks grouper", parameters));
// Remove the original peakList if requested
if (removeOriginal)
project.removePeakList(peakList);
logger.info("Finished isotopic peak grouper on " + peakList);
setStatus(TaskStatus.FINISHED);
}
Aggregations