use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NistMsSearchTask method findPeakRowGroup.
/**
* Determines the contemporaneity between all pairs of non-identical peak rows.
*
* @return a map holding pairs of adjacent (non-identical) peak rows. (x,y) <=> (y,x)
*/
private Set<PeakListRow> findPeakRowGroup() {
// Create neighbourhood.
final Set<PeakListRow> neighbours = new HashSet<PeakListRow>(INITIAL_NEIGHBOURHOOD_SIZE);
// Contemporaneous with self.
neighbours.add(peakListRow);
// Find neighbours.
final double rt = peakListRow.getAverageRT();
for (final PeakListRow row2 : peakList.getRows()) {
// Are peak rows contemporaneous?
if (!peakListRow.equals(row2) && rtTolerance.checkWithinTolerance(rt, row2.getAverageRT()) && (!sameIds || checkSameIds(peakListRow, row2))) {
neighbours.add(row2);
}
}
return neighbours;
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NistMsSearchTask method writeSpectraFile.
/**
* Writes a search spectrum file for the given row and its neighbours.
*
* @param peakRow the row.
* @param neighbourRows its neighbouring rows.
* @return the file.
* @throws IOException if an i/o problem occurs.
*/
private File writeSpectraFile(final PeakListRow peakRow, final Collection<PeakListRow> neighbourRows) throws IOException {
final File spectraFile = File.createTempFile(SPECTRA_FILE_PREFIX, SPECTRA_FILE_SUFFIX);
spectraFile.deleteOnExit();
final BufferedWriter writer = new BufferedWriter(new FileWriter(spectraFile));
try {
LOG.finest("Writing spectra to file " + spectraFile);
// Write header.
final PeakIdentity identity = peakRow.getPreferredPeakIdentity();
final String name = SPECTRUM_NAME_PREFIX + peakRow.getID() + (identity == null ? "" : " (" + identity + ')') + " of " + peakList.getName();
writer.write("Name: " + name.substring(0, Math.min(SPECTRUM_NAME_MAX_LENGTH, name.length())));
writer.newLine();
writer.write("Num Peaks: " + neighbourRows.size());
writer.newLine();
for (final PeakListRow row : neighbourRows) {
final Feature peak = row.getBestPeak();
final int charge = peak.getCharge();
final double mass = (peak.getMZ() - ionType.getAddedMass()) * (charge == 0 ? 1.0 : (double) charge);
writer.write(mass + "\t" + peak.getHeight());
writer.newLine();
}
} finally {
// Close the open file.
writer.close();
}
return spectraFile;
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class PeakListIdentificationTask method run.
@Override
public void run() {
if (!isCanceled()) {
try {
setStatus(TaskStatus.PROCESSING);
// Create database gateway.
gateway = db.getModule().getGatewayClass().newInstance();
// Identify the feature list rows starting from the biggest peaks.
final PeakListRow[] rows = peakList.getRows();
Arrays.sort(rows, new PeakListRowSorter(SortingProperty.Area, SortingDirection.Descending));
// Initialize counters.
numItems = rows.length;
// Process rows.
for (finishedItems = 0; !isCanceled() && finishedItems < numItems; finishedItems++) {
// Retrieve results for each row.
retrieveIdentification(rows[finishedItems]);
}
if (!isCanceled()) {
setStatus(TaskStatus.FINISHED);
}
} catch (Throwable t) {
final String msg = "Could not search " + db;
LOG.log(Level.WARNING, msg, t);
setStatus(TaskStatus.ERROR);
setErrorMessage(msg + ": " + ExceptionUtils.exceptionToString(t));
}
}
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NistMsSearchTask method trimNeighbours.
/**
* Trims the row neighbourhoods to the specified size.
*
* @param neighbourhoods map from each feature list row to its neighbours.
*/
private void trimNeighbours(final Map<PeakListRow, Set<PeakListRow>> neighbourhoods) {
if (maxPeaks > 0) {
// Process each row's neighbour list.
for (final Entry<PeakListRow, Set<PeakListRow>> entry : neighbourhoods.entrySet()) {
final PeakListRow row = entry.getKey();
final Set<PeakListRow> neighbours = entry.getValue();
// Need trimming?
if (neighbours.size() > maxPeaks) {
final List<PeakListRow> keepers = new ArrayList<PeakListRow>(neighbours);
// Exclude current row from sorting.
keepers.remove(row);
// Sort on RT difference (ascending) then intensity
// (descending)
final double rt = row.getAverageRT();
Collections.sort(keepers, new Comparator<PeakListRow>() {
@Override
public int compare(final PeakListRow o1, final PeakListRow o2) {
// Compare on RT difference (ascending).
final int compare = Double.compare(Math.abs(rt - o1.getAverageRT()), Math.abs(rt - o2.getAverageRT()));
// difference.
return compare == 0 ? Double.compare(o2.getAverageHeight(), o1.getAverageHeight()) : compare;
}
});
// Add the current row and keepers up to maxPeaks.
neighbours.clear();
neighbours.add(row);
neighbours.addAll(keepers.subList(0, maxPeaks - 1));
}
}
}
}
use of net.sf.mzmine.datamodel.PeakListRow in project mzmine2 by mzmine.
the class NistMsSearchTask method nistSearch.
/**
* Run the NIST search.
*
* @throws IOException if there are i/o problems.
*/
private void nistSearch() throws IOException {
// Waiting to get the SEMAPHORE: only one instance of NIST MS Search can
// run at a time.
setStatus(TaskStatus.WAITING);
synchronized (SEMAPHORE) {
File locatorFile2 = null;
try {
if (!isCanceled()) {
setStatus(TaskStatus.PROCESSING);
// Configure locator files.
final File locatorFile1 = new File(nistMsSearchDir, PRIMARY_LOCATOR_FILE_NAME);
locatorFile2 = getSecondLocatorFile(locatorFile1);
if (locatorFile2 == null) {
throw new IOException("Primary locator file " + locatorFile1 + " doesn't contain the name of a valid file.");
}
// Is MS Search already running?
if (locatorFile2.exists()) {
throw new IllegalStateException("NIST MS Search appears to be busy - please wait until it finishes its current task and then try again. Alternatively, try manually deleting the file " + locatorFile2);
}
}
// Single or multiple row search?
final PeakListRow[] peakListRows;
final Map<PeakListRow, Set<PeakListRow>> rowHoods;
if (peakListRow == null) {
peakListRows = peakList.getRows();
rowHoods = groupPeakRows();
} else {
peakListRows = new PeakListRow[] { peakListRow };
rowHoods = new HashMap<PeakListRow, Set<PeakListRow>>(1);
rowHoods.put(peakListRow, findPeakRowGroup());
}
// Reduce neighbourhoods to maximum number of peaks.
trimNeighbours(rowHoods);
// Store search results for each neighbourhood - to avoid repeat
// searches.
final int numRows = peakListRows.length;
final Map<Set<PeakListRow>, List<PeakIdentity>> rowIdentities = new HashMap<Set<PeakListRow>, List<PeakIdentity>>(numRows);
// Search command string.
final String command = nistMsSearchExe.getAbsolutePath() + ' ' + COMMAND_LINE_ARGS;
// Perform searches for each feature list row..
progress = 0;
progressMax = numRows;
for (final PeakListRow row : peakListRows) {
// Get the row's neighbours.
final Set<PeakListRow> neighbours = rowHoods.get(row);
// Has this neighbourhood's search been run already?
if (!rowIdentities.containsKey(neighbours)) {
if (!isCanceled()) {
// Write spectra file.
final File spectraFile = writeSpectraFile(row, neighbours);
// Write locator file.
writeSecondaryLocatorFile(locatorFile2, spectraFile);
// Run the search.
runNistMsSearch(command);
// Read the search results file and store the
// results.
rowIdentities.put(neighbours, readSearchResults(row));
}
}
// Get the search results.
final List<PeakIdentity> identities = rowIdentities.get(neighbours);
if (identities != null) {
// Add (copy of) identities to peak row.
int maxMatchFactor = -1;
for (final PeakIdentity identity : identities) {
// Copy the identity.
final PeakIdentity id = new SimplePeakIdentity((Hashtable<String, String>) identity.getAllProperties());
// Best match factor?
final boolean isPreferred;
final int matchFactor = Integer.parseInt(id.getPropertyValue(MATCH_FACTOR_PROPERTY));
if (matchFactor > maxMatchFactor) {
maxMatchFactor = matchFactor;
isPreferred = true;
} else {
isPreferred = false;
}
// Add peak identity.
row.addPeakIdentity(id, isPreferred);
}
// Notify the GUI about the change in the project
MZmineCore.getProjectManager().getCurrentProject().notifyObjectChanged(row, false);
}
progress++;
}
} finally {
// Clean up.
if (locatorFile2 != null) {
locatorFile2.delete();
}
}
}
}
Aggregations