use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class CameraSearchTask method groupPeaksByIsotope.
/**
* Uses Isotope-field in PeakIdentity to group isotopes and build spectrum
*
* @param peakList PeakList object
* @return new PeakList object
*/
private PeakList groupPeaksByIsotope(PeakList peakList) {
// Create new feature list.
final PeakList combinedPeakList = new SimplePeakList(peakList + " " + parameters.getParameter(CameraSearchParameters.SUFFIX).getValue(), peakList.getRawDataFiles());
// Load previous applied methods.
for (final PeakList.PeakListAppliedMethod method : peakList.getAppliedMethods()) {
combinedPeakList.addDescriptionOfAppliedTask(method);
}
// Add task description to feature list.
combinedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Bioconductor CAMERA", parameters));
// ------------------------------------------------
// Find unique isotopes belonging to the same group
// ------------------------------------------------
Set<String> isotopeGroups = new HashSet<>();
for (PeakListRow row : peakList.getRows()) {
PeakIdentity identity = row.getPreferredPeakIdentity();
if (identity == null)
continue;
String isotope = identity.getPropertyValue("Isotope");
if (isotope == null)
continue;
String isotopeGroup = isotope.substring(1, isotope.indexOf("]"));
if (isotopeGroup == null || isotopeGroup.length() == 0)
continue;
isotopeGroups.add(isotopeGroup);
}
List<PeakListRow> groupRows = new ArrayList<>();
Set<String> groupNames = new HashSet<>();
Map<Double, Double> spectrum = new HashMap<>();
List<PeakListRow> newPeakListRows = new ArrayList<>();
for (String isotopeGroup : isotopeGroups) {
// -----------------------------------------
// Find all peaks belonging to isotopeGroups
// -----------------------------------------
groupRows.clear();
groupNames.clear();
spectrum.clear();
int minLength = Integer.MAX_VALUE;
PeakListRow groupRow = null;
for (PeakListRow row : peakList.getRows()) {
PeakIdentity identity = row.getPreferredPeakIdentity();
if (identity == null)
continue;
String isotope = identity.getPropertyValue("Isotope");
if (isotope == null)
continue;
String isoGroup = isotope.substring(1, isotope.indexOf("]"));
if (isoGroup == null)
continue;
if (isoGroup.equals(isotopeGroup)) {
groupRows.add(row);
groupNames.add(identity.getName());
spectrum.put(row.getAverageMZ(), row.getAverageHeight());
if (isoGroup.length() < minLength) {
minLength = isoGroup.length();
groupRow = row;
}
}
}
// Skip peaks that have different identity names (belong to different pcgroup)
if (groupRow == null || groupNames.size() != 1)
continue;
if (groupRow == null)
continue;
PeakIdentity identity = groupRow.getPreferredPeakIdentity();
if (identity == null)
continue;
DataPoint[] dataPoints = new DataPoint[spectrum.size()];
int count = 0;
for (Entry<Double, Double> e : spectrum.entrySet()) dataPoints[count++] = new SimpleDataPoint(e.getKey(), e.getValue());
IsotopePattern pattern = new SimpleIsotopePattern(dataPoints, IsotopePatternStatus.PREDICTED, "Spectrum");
groupRow.getBestPeak().setIsotopePattern(pattern);
// combinedPeakList.addRow(groupRow);
newPeakListRows.add(groupRow);
}
if (includeSingletons) {
for (PeakListRow row : peakList.getRows()) {
PeakIdentity identity = row.getPreferredPeakIdentity();
if (identity == null)
continue;
String isotope = identity.getPropertyValue("Isotope");
if (isotope == null || isotope.length() == 0) {
DataPoint[] dataPoints = new DataPoint[1];
dataPoints[0] = new SimpleDataPoint(row.getAverageMZ(), row.getAverageHeight());
IsotopePattern pattern = new SimpleIsotopePattern(dataPoints, IsotopePatternStatus.PREDICTED, "Spectrum");
row.getBestPeak().setIsotopePattern(pattern);
newPeakListRows.add(row);
}
}
}
// ------------------------------------
// Sort new peak rows by retention time
// ------------------------------------
Collections.sort(newPeakListRows, new Comparator<PeakListRow>() {
@Override
public int compare(PeakListRow row1, PeakListRow row2) {
double retTime1 = row1.getAverageRT();
double retTime2 = row2.getAverageRT();
return Double.compare(retTime1, retTime2);
}
});
for (PeakListRow row : newPeakListRows) combinedPeakList.addRow(row);
return combinedPeakList;
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class AdductSearchTask method searchAdducts.
/**
* Search peak-list for adducts.
*/
private void searchAdducts() {
// Get rows.
final PeakListRow[] rows = peakList.getRows();
totalRows = rows.length;
// Start with the highest peaks.
Arrays.sort(rows, new PeakListRowSorter(SortingProperty.Height, SortingDirection.Descending));
// Compare each pair of rows against each other.
for (int i = 0; !isCanceled() && i < totalRows; i++) {
for (int j = 0; !isCanceled() && j < totalRows; j++) {
if (i == j)
continue;
findAdducts(rows[i], rows[j]);
}
finishedRows++;
}
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class TargetedPeakDetectionModuleTask method run.
public void run() {
setStatus(TaskStatus.PROCESSING);
// Calculate total number of scans in all files
totalScans = dataFile.getNumOfScans(1);
// Create new feature list
processedPeakList = new SimplePeakList(dataFile.getName() + " " + suffix, dataFile);
List<PeakInformation> peaks = this.readFile();
if (peaks == null || peaks.isEmpty()) {
setStatus(TaskStatus.ERROR);
setErrorMessage("Could not read file or the file is empty ");
return;
}
// Fill new feature list with empty rows
for (int row = 0; row < peaks.size(); row++) {
PeakListRow newRow = new SimplePeakListRow(ID++);
processedPeakList.addRow(newRow);
}
// Canceled?
if (isCanceled()) {
return;
}
List<Gap> gaps = new ArrayList<Gap>();
// gaps if necessary
for (int row = 0; row < peaks.size(); row++) {
PeakListRow newRow = processedPeakList.getRow(row);
// Create a new gap
Range<Double> mzRange = mzTolerance.getToleranceRange(peaks.get(row).getMZ());
Range<Double> rtRange = rtTolerance.getToleranceRange(peaks.get(row).getRT());
newRow.addPeakIdentity(new SimplePeakIdentity(peaks.get(row).getName()), true);
Gap newGap = new Gap(newRow, dataFile, mzRange, rtRange, intTolerance, noiseLevel);
gaps.add(newGap);
}
// Stop processing this file if there are no gaps
if (gaps.isEmpty()) {
processedScans += dataFile.getNumOfScans();
}
// Get all scans of this data file
int[] scanNumbers = dataFile.getScanNumbers(msLevel);
if (scanNumbers == null) {
logger.log(Level.WARNING, "Could not read file with the MS level of " + msLevel);
setStatus(TaskStatus.ERROR);
return;
}
// Process each scan
for (int scanNumber : scanNumbers) {
// Canceled?
if (isCanceled()) {
return;
}
// Get the scan
Scan scan = dataFile.getScan(scanNumber);
// Feed this scan to all gaps
for (Gap gap : gaps) {
gap.offerNextScan(scan);
}
processedScans++;
}
// Finalize gaps
for (Gap gap : gaps) {
gap.noMoreOffers();
}
// Append processed feature list to the project
project.addPeakList(processedPeakList);
// Add quality parameters to peaks
QualityParameters.calculateQualityParameters(processedPeakList);
// Add task description to peakList
processedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Targeted feature detection ", parameters));
logger.log(Level.INFO, "Finished targeted feature detection on {0}", this.dataFile);
setStatus(TaskStatus.FINISHED);
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class CSVExportTask method exportPeakList.
private void exportPeakList(PeakList peakList, FileWriter writer, File fileName) {
NumberFormat mzForm = MZmineCore.getConfiguration().getMZFormat();
RawDataFile[] rawDataFiles = peakList.getRawDataFiles();
// Buffer for writing
StringBuffer line = new StringBuffer();
// Write column headers
// Common elements
int length = commonElements.length;
String name;
for (int i = 0; i < length; i++) {
name = commonElements[i].toString();
name = name.replace("Export ", "");
name = escapeStringForCSV(name);
line.append(name + fieldSeparator);
}
// peak Information
Set<String> peakInformationFields = new HashSet<>();
for (PeakListRow row : peakList.getRows()) {
if (!filter.filter(row))
continue;
if (row.getPeakInformation() != null) {
for (String key : row.getPeakInformation().getAllProperties().keySet()) {
peakInformationFields.add(key);
}
}
}
if (exportAllPeakInfo)
for (String field : peakInformationFields) line.append(field + fieldSeparator);
// Data file elements
length = dataFileElements.length;
for (int df = 0; df < peakList.getNumberOfRawDataFiles(); df++) {
for (int i = 0; i < length; i++) {
name = rawDataFiles[df].getName();
name = name + " " + dataFileElements[i].toString();
name = escapeStringForCSV(name);
line.append(name + fieldSeparator);
}
}
line.append("\n");
try {
writer.write(line.toString());
} catch (Exception e) {
setStatus(TaskStatus.ERROR);
setErrorMessage("Could not write to file " + fileName);
return;
}
// Write data rows
for (PeakListRow peakListRow : peakList.getRows()) {
if (!filter.filter(peakListRow)) {
processedRows++;
continue;
}
// Cancel?
if (isCanceled()) {
return;
}
// Reset the buffer
line.setLength(0);
// Common elements
length = commonElements.length;
for (int i = 0; i < length; i++) {
switch(commonElements[i]) {
case ROW_ID:
line.append(peakListRow.getID() + fieldSeparator);
break;
case ROW_MZ:
line.append(peakListRow.getAverageMZ() + fieldSeparator);
break;
case ROW_RT:
line.append(peakListRow.getAverageRT() + fieldSeparator);
break;
case ROW_IDENTITY:
// Identity elements
PeakIdentity peakId = peakListRow.getPreferredPeakIdentity();
if (peakId == null) {
line.append(fieldSeparator);
break;
}
String propertyValue = peakId.toString();
propertyValue = escapeStringForCSV(propertyValue);
line.append(propertyValue + fieldSeparator);
break;
case ROW_IDENTITY_ALL:
// Identity elements
PeakIdentity[] peakIdentities = peakListRow.getPeakIdentities();
propertyValue = "";
for (int x = 0; x < peakIdentities.length; x++) {
if (x > 0)
propertyValue += idSeparator;
propertyValue += peakIdentities[x].toString();
}
propertyValue = escapeStringForCSV(propertyValue);
line.append(propertyValue + fieldSeparator);
break;
case ROW_IDENTITY_DETAILS:
peakId = peakListRow.getPreferredPeakIdentity();
if (peakId == null) {
line.append(fieldSeparator);
break;
}
propertyValue = peakId.getDescription();
if (propertyValue != null)
propertyValue = propertyValue.replaceAll("\\n", ";");
propertyValue = escapeStringForCSV(propertyValue);
line.append(propertyValue + fieldSeparator);
break;
case ROW_COMMENT:
String comment = escapeStringForCSV(peakListRow.getComment());
line.append(comment + fieldSeparator);
break;
case ROW_PEAK_NUMBER:
int numDetected = 0;
for (Feature p : peakListRow.getPeaks()) {
if (p.getFeatureStatus() == FeatureStatus.DETECTED) {
numDetected++;
}
}
line.append(numDetected + fieldSeparator);
break;
}
}
// peak Information
if (exportAllPeakInfo) {
if (peakListRow.getPeakInformation() != null) {
Map<String, String> allPropertiesMap = peakListRow.getPeakInformation().getAllProperties();
for (String key : peakInformationFields) {
String value = allPropertiesMap.get(key);
if (value == null)
value = "";
line.append(value + fieldSeparator);
}
}
}
// Data file elements
length = dataFileElements.length;
for (RawDataFile dataFile : rawDataFiles) {
for (int i = 0; i < length; i++) {
Feature peak = peakListRow.getPeak(dataFile);
if (peak != null) {
switch(dataFileElements[i]) {
case PEAK_STATUS:
line.append(peak.getFeatureStatus() + fieldSeparator);
break;
case PEAK_NAME:
line.append(PeakUtils.peakToString(peak) + fieldSeparator);
break;
case PEAK_MZ:
line.append(peak.getMZ() + fieldSeparator);
break;
case PEAK_RT:
line.append(peak.getRT() + fieldSeparator);
break;
case PEAK_RT_START:
line.append(peak.getRawDataPointsRTRange().lowerEndpoint() + fieldSeparator);
break;
case PEAK_RT_END:
line.append(peak.getRawDataPointsRTRange().upperEndpoint() + fieldSeparator);
break;
case PEAK_DURATION:
line.append(RangeUtils.rangeLength(peak.getRawDataPointsRTRange()) + fieldSeparator);
break;
case PEAK_HEIGHT:
line.append(peak.getHeight() + fieldSeparator);
break;
case PEAK_AREA:
line.append(peak.getArea() + fieldSeparator);
break;
case PEAK_CHARGE:
line.append(peak.getCharge() + fieldSeparator);
break;
case PEAK_DATAPOINTS:
line.append(peak.getScanNumbers().length + fieldSeparator);
break;
case PEAK_FWHM:
line.append(peak.getFWHM() + fieldSeparator);
break;
case PEAK_TAILINGFACTOR:
line.append(peak.getTailingFactor() + fieldSeparator);
break;
case PEAK_ASYMMETRYFACTOR:
line.append(peak.getAsymmetryFactor() + fieldSeparator);
break;
case PEAK_MZMIN:
line.append(peak.getRawDataPointsMZRange().lowerEndpoint() + fieldSeparator);
break;
case PEAK_MZMAX:
line.append(peak.getRawDataPointsMZRange().upperEndpoint() + fieldSeparator);
break;
}
} else {
switch(dataFileElements[i]) {
case PEAK_STATUS:
line.append(FeatureStatus.UNKNOWN + fieldSeparator);
break;
default:
line.append("0" + fieldSeparator);
break;
}
}
}
}
line.append("\n");
try {
writer.write(line.toString());
} catch (Exception e) {
setStatus(TaskStatus.ERROR);
setErrorMessage("Could not write to file " + fileName);
return;
}
processedRows++;
}
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class GnpsFbmnExportTask method copyPeakRow.
/**
* Create a copy of a feature list row.
*/
private static PeakListRow copyPeakRow(final PeakListRow row) {
// Copy the feature list row.
final PeakListRow newRow = new SimplePeakListRow(row.getID());
PeakUtils.copyPeakListRowProperties(row, newRow);
// Copy the peaks.
for (final Feature peak : row.getPeaks()) {
final Feature newPeak = new SimpleFeature(peak);
PeakUtils.copyPeakProperties(peak, newPeak);
newRow.addPeak(peak.getDataFile(), newPeak);
}
return newRow;
}
Aggregations