use of net.sf.mzmine.datamodel.impl.SimplePeakIdentity in project mzmine2 by mzmine.
the class FormulaPredictionPeakListTask method run.
/**
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
setStatus(TaskStatus.PROCESSING);
totalRows = peakList.getNumberOfRows();
for (PeakListRow row : peakList.getRows()) {
if (row.getPeakIdentities().length > 0) {
continue;
}
this.searchedMass = (row.getAverageMZ() - ionType.getAddedMass()) * charge;
message = "Formula prediction for " + MZmineCore.getConfiguration().getMZFormat().format(searchedMass);
massRange = mzTolerance.getToleranceRange(searchedMass);
IChemObjectBuilder builder = SilentChemObjectBuilder.getInstance();
generator = new MolecularFormulaGenerator(builder, massRange.lowerEndpoint(), massRange.upperEndpoint(), elementCounts);
IMolecularFormula cdkFormula;
// create a map to store ResultFormula and relative mass deviation for sorting
Map<Double, String> possibleFormulas = new TreeMap<>();
while ((cdkFormula = generator.getNextFormula()) != null) {
if (isCanceled())
return;
// Mass is ok, so test other constraints
if (checkConstraints(cdkFormula, row) == true) {
String formula = MolecularFormulaManipulator.getString(cdkFormula);
// calc rel mass deviation
Double relMassDev = ((searchedMass - (FormulaUtils.calculateExactMass(formula))) / searchedMass) * 1000000;
// write to map
possibleFormulas.put(relMassDev, formula);
}
}
if (isCanceled())
return;
// create a map to store ResultFormula and relative mass deviation for sorting
Map<Double, String> possibleFormulasSorted = new TreeMap<>((Comparator<Double>) (o1, o2) -> Double.compare(Math.abs(o1), Math.abs(o2)));
possibleFormulasSorted.putAll(possibleFormulas);
// Add the new formula entry top results
int ctr = 0;
for (Map.Entry<Double, String> entry : possibleFormulasSorted.entrySet()) {
if (ctr < maxBestFormulasPerPeak) {
SimplePeakIdentity newIdentity = new SimplePeakIdentity(entry.getValue(), entry.getValue(), this.getClass().getName(), null, null);
row.addPeakIdentity(newIdentity, false);
ctr++;
}
}
if (isCanceled())
return;
finishedRows++;
}
if (isCanceled())
return;
logger.finest("Finished formula search for all the peaks");
setStatus(TaskStatus.FINISHED);
}
use of net.sf.mzmine.datamodel.impl.SimplePeakIdentity in project mzmine2 by mzmine.
the class LipidSearchTask method searchModifications.
private void searchModifications(PeakListRow rows, double lipidIonMass, LipidIdentity lipid, double[] lipidModificationMasses, Range<Double> mzTolModification) {
for (int j = 0; j < lipidModificationMasses.length; j++) {
if (mzTolModification.contains(lipidIonMass + (lipidModificationMasses[j]))) {
// Calc relativ mass deviation
double relMassDev = ((lipidIonMass + (lipidModificationMasses[j]) - rows.getAverageMZ()) / (lipidIonMass + lipidModificationMasses[j])) * 1000000;
// Add row identity
rows.addPeakIdentity(new SimplePeakIdentity(lipid + " " + lipidModification[j]), false);
rows.setComment("Ionization: " + ionizationType.getAdduct() + " " + lipidModification[j] + ", Δ " + NumberFormat.getInstance().format(relMassDev) + " ppm");
logger.info("Found modified lipid: " + lipid.getName() + " " + lipidModification[j] + ", Δ " + NumberFormat.getInstance().format(relMassDev) + " ppm");
// Notify the GUI about the change in the project
MZmineCore.getProjectManager().getCurrentProject().notifyObjectChanged(rows, false);
}
}
}
use of net.sf.mzmine.datamodel.impl.SimplePeakIdentity in project mzmine2 by mzmine.
the class CustomDBSearchTask method processOneLine.
private void processOneLine(String[] values) {
int numOfColumns = Math.min(fieldOrder.length, values.length);
String lineID = null, lineName = null, lineFormula = null;
double lineMZ = 0, lineRT = 0;
for (int i = 0; i < numOfColumns; i++) {
if (fieldOrder[i] == FieldItem.FIELD_ID)
lineID = values[i];
if (fieldOrder[i] == FieldItem.FIELD_NAME)
lineName = values[i];
if (fieldOrder[i] == FieldItem.FIELD_FORMULA)
lineFormula = values[i];
if (fieldOrder[i] == FieldItem.FIELD_MZ)
lineMZ = Double.parseDouble(values[i]);
if (fieldOrder[i] == FieldItem.FIELD_RT)
lineRT = Double.parseDouble(values[i]);
}
SimplePeakIdentity newIdentity = new SimplePeakIdentity(lineName, lineFormula, dataBaseFile.getName(), lineID, null);
for (PeakListRow peakRow : peakList.getRows()) {
Range<Double> mzRange = mzTolerance.getToleranceRange(peakRow.getAverageMZ());
Range<Double> rtRange = rtTolerance.getToleranceRange(peakRow.getAverageRT());
boolean mzMatches = (lineMZ == 0d) || mzRange.contains(lineMZ);
boolean rtMatches = (lineRT == 0d) || rtRange.contains(lineRT);
if (mzMatches && rtMatches) {
logger.finest("Found compound " + lineName + " (m/z " + lineMZ + ", RT " + lineRT + ")");
// add new identity to the row
peakRow.addPeakIdentity(newIdentity, false);
// Notify the GUI about the change in the project
MZmineCore.getProjectManager().getCurrentProject().notifyObjectChanged(peakRow, false);
}
}
}
use of net.sf.mzmine.datamodel.impl.SimplePeakIdentity in project mzmine2 by mzmine.
the class CameraSearchTask method addPseudoSpectraIdentities.
/**
* Add pseudo-spectra identities.
*
* @param peaks peaks to annotate with identities.
* @param spectraExp the pseudo-spectra ids vector.
* @param isotopeExp the isotopes vector.
*/
private void addPseudoSpectraIdentities(final Feature[] peaks, final int[] spectra, final String[] isotopes, final String[] adducts) {
// Add identities for each peak.
int peakIndex = 0;
for (final Feature peak : peaks) {
// Create pseudo-spectrum identity
final SimplePeakIdentity identity = new SimplePeakIdentity("Pseudo-spectrum #" + String.format("%03d", spectra[peakIndex]));
identity.setPropertyValue(PeakIdentity.PROPERTY_METHOD, "Bioconductor CAMERA");
// Add isotope info, if any.
if (isotopes != null) {
final String isotope = isotopes[peakIndex].trim();
if (isotope.length() > 0) {
// Parse the isotope pattern.
final Matcher matcher = ISOTOPE_PATTERN.matcher(isotope);
if (matcher.matches()) {
// identity.setPropertyValue("Isotope", matcher.group(1));
identity.setPropertyValue("Isotope", isotope);
} else {
LOG.warning("Irregular isotope value: " + isotope);
}
}
}
if (adducts != null) {
final String adduct = adducts[peakIndex].trim();
if (adduct.length() > 0)
identity.setPropertyValue("Adduct", adduct);
}
// Add identity to peak's row.
PeakListRow row = peakList.getPeakRow(peak);
for (PeakIdentity peakIdentity : row.getPeakIdentities()) row.removePeakIdentity(peakIdentity);
peakList.getPeakRow(peak).addPeakIdentity(identity, true);
peakIndex++;
}
}
use of net.sf.mzmine.datamodel.impl.SimplePeakIdentity in project mzmine2 by mzmine.
the class TargetedPeakDetectionModuleTask method run.
public void run() {
setStatus(TaskStatus.PROCESSING);
// Calculate total number of scans in all files
totalScans = dataFile.getNumOfScans(1);
// Create new feature list
processedPeakList = new SimplePeakList(dataFile.getName() + " " + suffix, dataFile);
List<PeakInformation> peaks = this.readFile();
if (peaks == null || peaks.isEmpty()) {
setStatus(TaskStatus.ERROR);
setErrorMessage("Could not read file or the file is empty ");
return;
}
// Fill new feature list with empty rows
for (int row = 0; row < peaks.size(); row++) {
PeakListRow newRow = new SimplePeakListRow(ID++);
processedPeakList.addRow(newRow);
}
// Canceled?
if (isCanceled()) {
return;
}
List<Gap> gaps = new ArrayList<Gap>();
// gaps if necessary
for (int row = 0; row < peaks.size(); row++) {
PeakListRow newRow = processedPeakList.getRow(row);
// Create a new gap
Range<Double> mzRange = mzTolerance.getToleranceRange(peaks.get(row).getMZ());
Range<Double> rtRange = rtTolerance.getToleranceRange(peaks.get(row).getRT());
newRow.addPeakIdentity(new SimplePeakIdentity(peaks.get(row).getName()), true);
Gap newGap = new Gap(newRow, dataFile, mzRange, rtRange, intTolerance, noiseLevel);
gaps.add(newGap);
}
// Stop processing this file if there are no gaps
if (gaps.isEmpty()) {
processedScans += dataFile.getNumOfScans();
}
// Get all scans of this data file
int[] scanNumbers = dataFile.getScanNumbers(msLevel);
if (scanNumbers == null) {
logger.log(Level.WARNING, "Could not read file with the MS level of " + msLevel);
setStatus(TaskStatus.ERROR);
return;
}
// Process each scan
for (int scanNumber : scanNumbers) {
// Canceled?
if (isCanceled()) {
return;
}
// Get the scan
Scan scan = dataFile.getScan(scanNumber);
// Feed this scan to all gaps
for (Gap gap : gaps) {
gap.offerNextScan(scan);
}
processedScans++;
}
// Finalize gaps
for (Gap gap : gaps) {
gap.noMoreOffers();
}
// Append processed feature list to the project
project.addPeakList(processedPeakList);
// Add quality parameters to peaks
QualityParameters.calculateQualityParameters(processedPeakList);
// Add task description to peakList
processedPeakList.addDescriptionOfAppliedTask(new SimplePeakListAppliedMethod("Targeted feature detection ", parameters));
logger.log(Level.INFO, "Finished targeted feature detection on {0}", this.dataFile);
setStatus(TaskStatus.FINISHED);
}
Aggregations