use of org.baderlab.csplugins.enrichmentmap.model.SetOfEnrichmentResults in project EnrichmentMapApp by BaderLab.
the class LegacySessionLoader method loadSession.
/**
* Restore Enrichment maps
*
* @param pStateFileList - list of files associated with thie session
*/
@SuppressWarnings("unchecked")
public void loadSession(CySession session) {
Map<Long, EnrichmentMapParameters> paramsMap = new HashMap<>();
Map<Long, EnrichmentMap> enrichmentMapMap = new HashMap<>();
List<File> fileList = session.getAppFileListMap().get(CyActivator.APP_NAME);
try {
//go through the prop files first to create the correct objects to be able to add other files to.
for (File prop_file : fileList) {
if (prop_file.getName().contains(".props")) {
InputStream reader = streamUtil.getInputStream(prop_file.getAbsolutePath());
String fullText = new Scanner(reader, "UTF-8").useDelimiter("\\A").next();
//Given the file with all the parameters create a new parameter
EnrichmentMapParameters params = enrichmentMapParametersFactory.create(fullText);
EnrichmentMap em = new EnrichmentMap(params.getCreationParameters(), serviceRegistrar);
//get the network name
String param_name = em.getName();
//TODO:distinguish between GSEA and EM saved sessions
String props_name = (prop_file.getName().split("\\."))[0];
String networkName = param_name;
//related to bug ticket #49
if (!props_name.equalsIgnoreCase(param_name))
networkName = props_name;
//after associated the properties with the network
//initialized each Dataset that we have files for
HashMap<String, DataSetFiles> files = params.getFiles();
for (Iterator<String> j = params.getFiles().keySet().iterator(); j.hasNext(); ) {
String current_dataset = j.next();
Method method = EnrichmentMapParameters.stringToMethod(params.getMethod());
em.createDataSet(current_dataset, method, files.get(current_dataset));
}
CyNetwork network = getNetworkByName(networkName);
Long suid = network.getSUID();
em.setNetworkID(suid);
paramsMap.put(suid, params);
enrichmentMapMap.put(suid, em);
}
}
// go through the rest of the files
for (File propFile : fileList) {
FileNameParts parts = ParseFileName(propFile);
if (parts == null || propFile.getName().contains(".props"))
continue;
CyNetwork net = getNetworkByName(parts.name);
EnrichmentMap em = net == null ? null : enrichmentMapMap.get(net.getSUID());
EnrichmentMapParameters params = paramsMap.get(net.getSUID());
Method method = EnrichmentMapParameters.stringToMethod(params.getMethod());
if (em == null) {
System.out.println("network for file" + propFile.getName() + " does not exist.");
} else if ((!propFile.getName().contains(".props")) && (!propFile.getName().contains(".expression1.txt")) && (!propFile.getName().contains(".expression2.txt"))) {
HashMap<String, String> props = params.getProps();
//if this a dataset specific file make sure there is a dataset object for it
if (!(parts.dataset == null) && em.getDataSet(parts.dataset) == null && !parts.dataset.equalsIgnoreCase("signature"))
em.createDataSet(parts.dataset, method, params.getFiles().get(parts.dataset));
if (parts.type == null)
System.out.println("Sorry, unable to determine the type of the file: " + propFile.getName());
//read the file
InputStream reader = streamUtil.getInputStream(propFile.getAbsolutePath());
String fullText = new Scanner(reader, "UTF-8").useDelimiter("\\A").next();
//if the file is empty then skip it
if (fullText == null || fullText.equalsIgnoreCase(""))
continue;
if (propFile.getName().contains(".gmt")) {
HashMap<String, GeneSet> gsMap = (HashMap<String, GeneSet>) params.repopulateHashmap(fullText, 1);
if (propFile.getName().contains(".signature.gmt")) {
// TODO Find a better way to serialize EMSignatureDataSet
String sdsName = propFile.getName().replace(".signature.gmt", "");
sdsName = NamingUtil.getUniqueName(sdsName, em.getSignatureDataSets().keySet());
EMSignatureDataSet sigDataSet = new EMSignatureDataSet(sdsName);
em.addSignatureDataSet(sigDataSet);
SetOfGeneSets sigGeneSets = sigDataSet.getGeneSetsOfInterest();
gsMap.forEach((k, v) -> sigGeneSets.addGeneSet(k, v));
} else if (propFile.getName().contains(".set2.gmt")) {
// account for legacy session files
if (em.getAllGeneSets().containsKey(LegacySupport.DATASET2)) {
SetOfGeneSets gs = new SetOfGeneSets(LegacySupport.DATASET2, props);
gs.setGeneSets(gsMap);
}
} else {
SetOfGeneSets gs = new SetOfGeneSets(parts.dataset, props);
gs.setGeneSets(gsMap);
em.getDataSets().get(parts.dataset).setSetOfGeneSets(gs);
}
}
if (propFile.getName().contains(".genes.txt")) {
HashMap<String, Integer> genes = params.repopulateHashmap(fullText, 2);
genes.forEach(em::addGene);
//ticket #188 - unable to open session files that have empty enrichment maps.
if (genes != null && !genes.isEmpty())
// Ticket #107 : restore also gene count (needed to determine the next free hash in case we do PostAnalysis with a restored session)
em.setNumberOfGenes(Math.max(em.getNumberOfGenes(), Collections.max(genes.values()) + 1));
}
if (propFile.getName().contains(".hashkey2genes.txt")) {
HashMap<Integer, String> hashkey2gene = params.repopulateHashmap(fullText, 5);
//ticket #188 - unable to open session files that have empty enrichment maps.
if (hashkey2gene != null && !hashkey2gene.isEmpty())
// Ticket #107 : restore also gene count (needed to determine the next free hash in case we do PostAnalysis with a restored session)
em.setNumberOfGenes(Math.max(em.getNumberOfGenes(), Collections.max(hashkey2gene.keySet()) + 1));
}
if ((parts.type != null && (parts.type.equalsIgnoreCase("ENR") || (parts.type.equalsIgnoreCase("SubENR")))) || propFile.getName().contains(".ENR1.txt") || propFile.getName().contains(".SubENR1.txt")) {
SetOfEnrichmentResults enrichments;
int temp = 1;
//check to see if this dataset has enrichment results already
if (parts.dataset != null && em.getDataSet(parts.dataset).getEnrichments() != null) {
enrichments = em.getDataSet(parts.dataset).getEnrichments();
} else if (parts.dataset == null) {
enrichments = em.getDataSet(LegacySupport.DATASET1).getEnrichments();
/*enrichments = new SetOfEnrichmentResults(EnrichmentMap.DATASET1,props);
em.getDataset(EnrichmentMap.DATASET1).setEnrichments(enrichments);*/
} else {
enrichments = new SetOfEnrichmentResults(parts.dataset, props);
em.getDataSet(parts.dataset).setEnrichments(enrichments);
}
if (parts.type.equalsIgnoreCase("ENR") || propFile.getName().contains(".ENR1.txt")) {
if (params.getMethod().equalsIgnoreCase(EnrichmentMapParameters.method_GSEA))
enrichments.setEnrichments(params.repopulateHashmap(fullText, 3));
else
enrichments.setEnrichments(params.repopulateHashmap(fullText, 4));
}
}
//it would only happen for sessions saved with version 0.8
if (propFile.getName().contains(".RANKS1.txt") || propFile.getName().contains(".RANKS1Genes.txt")) {
Ranking new_ranking;
//Check to see if there is already GSEARanking
if (em.getDataSet(LegacySupport.DATASET1).getExpressionSets().getAllRanksNames().contains(Ranking.GSEARanking)) {
new_ranking = em.getDataSet(LegacySupport.DATASET1).getExpressionSets().getRanksByName(Ranking.GSEARanking);
} else {
new_ranking = new Ranking();
em.getDataSet(LegacySupport.DATASET1).getExpressionSets().addRanks(Ranking.GSEARanking, new_ranking);
}
if (propFile.getName().contains(".RANKS1.txt")) {
Map<Integer, Rank> ranks = (Map<Integer, Rank>) params.repopulateHashmap(fullText, 7);
ranks.forEach(new_ranking::addRank);
}
// if(prop_file.getName().contains(".RANKS1Genes.txt"))
// new_ranking.setRank2gene(em.getParams().repopulateHashmap(fullText,7));
// if(prop_file.getName().contains(".RANKS1.txt"))
// new_ranking.setRanking(em.getParams().repopulateHashmap(fullText,6));
}
if (propFile.getName().contains(".RANKS.txt")) {
if (parts.ranks_name == null) {
//we need to get the name of this set of rankings
// network_name.ranking_name.ranks.txt --> split by "." and get 2
String[] file_name_tokens = (propFile.getName()).split("\\.");
if ((file_name_tokens.length == 4) && (file_name_tokens[1].equals("Dataset 1 Ranking") || file_name_tokens[1].equals("Dataset 2 Ranking")) || (propFile.getName().contains(Ranking.GSEARanking)))
parts.ranks_name = Ranking.GSEARanking;
else //this is an extra rank file for backwards compatability. Ignore it.
if ((file_name_tokens.length == 4) && (file_name_tokens[1].equals("Dataset 1") || file_name_tokens[1].equals("Dataset 2")) && file_name_tokens[2].equals("RANKS"))
continue;
else
//file name is not structured properly --> default to file name
parts.ranks_name = propFile.getName();
}
Ranking new_ranking = new Ranking();
Map<Integer, Rank> ranks = (Map<Integer, Rank>) params.repopulateHashmap(fullText, 6);
ranks.forEach(new_ranking::addRank);
if (parts.dataset != null)
em.getDataSet(parts.dataset).getExpressionSets().addRanks(parts.ranks_name, new_ranking);
else
em.getDataSet(LegacySupport.DATASET1).getExpressionSets().addRanks(parts.ranks_name, new_ranking);
}
//Deal with legacy issues
if (params.isTwoDatasets()) {
//make sure there is a Dataset2
if (!em.getDataSets().containsKey(LegacySupport.DATASET2))
em.createDataSet(LegacySupport.DATASET2, method, new DataSetFiles());
if (propFile.getName().contains(".ENR2.txt") || propFile.getName().contains(".SubENR2.txt")) {
SetOfEnrichmentResults enrichments;
//check to see if this dataset has enrichment results already
if (em.getDataSet(LegacySupport.DATASET2).getEnrichments() != null) {
enrichments = em.getDataSet(LegacySupport.DATASET2).getEnrichments();
} else {
enrichments = new SetOfEnrichmentResults(LegacySupport.DATASET2, props);
em.getDataSet(LegacySupport.DATASET2).setEnrichments(enrichments);
}
if (propFile.getName().contains(".ENR2.txt")) {
if (params.getMethod().equalsIgnoreCase(EnrichmentMapParameters.method_GSEA))
enrichments.setEnrichments(params.repopulateHashmap(fullText, 3));
else
enrichments.setEnrichments(params.repopulateHashmap(fullText, 4));
}
}
//it would only happen for sessions saved with version 0.8
if (propFile.getName().contains(".RANKS2.txt") || propFile.getName().contains(".RANKS2Genes.txt")) {
Ranking new_ranking;
// Check to see if there is already GSEARanking
if (em.getDataSet(LegacySupport.DATASET2).getExpressionSets().getAllRanksNames().contains(Ranking.GSEARanking)) {
new_ranking = em.getDataSet(LegacySupport.DATASET2).getExpressionSets().getRanksByName(Ranking.GSEARanking);
} else {
new_ranking = new Ranking();
em.getDataSet(LegacySupport.DATASET2).getExpressionSets().addRanks(Ranking.GSEARanking, new_ranking);
}
if (propFile.getName().contains(".RANKS2.txt")) {
Map<Integer, Rank> ranks = (Map<Integer, Rank>) params.repopulateHashmap(fullText, 6);
ranks.forEach(new_ranking::addRank);
}
}
}
}
}
//info from the parameters
for (int i = 0; i < fileList.size(); i++) {
File prop_file = fileList.get(i);
FileNameParts parts_exp = ParseFileName(prop_file);
//unrecognized file
if ((parts_exp == null) || (parts_exp.name == null))
continue;
CyNetwork net = getNetworkByName(parts_exp.name);
EnrichmentMap map = net == null ? null : enrichmentMapMap.get(net.getSUID());
EnrichmentMapParameters params = paramsMap.get(net.getSUID());
Map<String, String> props = params.getProps();
if (parts_exp.type != null && parts_exp.type.equalsIgnoreCase("expression")) {
if (map.getDataSets().containsKey(parts_exp.dataset)) {
EMDataSet ds = map.getDataSet(parts_exp.dataset);
ds.getDataSetFiles().setExpressionFileName(prop_file.getAbsolutePath());
ds.getExpressionSets().setFilename(prop_file.getAbsolutePath());
ExpressionFileReaderTask expressionFile1 = new ExpressionFileReaderTask(ds);
GeneExpressionMatrix matrix = expressionFile1.parse();
matrix.restoreProps(parts_exp.dataset, props);
}
}
//Deal with legacy session files.
if (prop_file.getName().contains("expression1.txt")) {
EMDataSet ds1 = map.getDataSet(LegacySupport.DATASET1);
ds1.getDataSetFiles().setExpressionFileName(prop_file.getAbsolutePath());
ds1.getExpressionSets().setFilename(prop_file.getAbsolutePath());
ExpressionFileReaderTask expressionFile1 = new ExpressionFileReaderTask(ds1);
expressionFile1.parse();
}
if (prop_file.getName().contains("expression2.txt")) {
EMDataSet ds2 = map.getDataSet(LegacySupport.DATASET2);
ds2.getDataSetFiles().setExpressionFileName(prop_file.getAbsolutePath());
ds2.getExpressionSets().setFilename(prop_file.getAbsolutePath());
ExpressionFileReaderTask expressionFile2 = new ExpressionFileReaderTask(ds2);
expressionFile2.parse();
//are dealing with two distinct expression files.
if (map.getDataSet(LegacySupport.DATASET2) != null && map.getDataSet(LegacySupport.DATASET2).getGeneSetsOfInterest() != null && !map.getDataSet(LegacySupport.DATASET2).getGeneSetsOfInterest().getGeneSets().isEmpty()) {
map.setDistinctExpressionSets(true);
map.getDataSet(LegacySupport.DATASET1).setDataSetGenes(new HashSet<Integer>((Set<Integer>) map.getDataSet(LegacySupport.DATASET1).getExpressionSets().getGeneIds()));
map.getDataSet(LegacySupport.DATASET2).setDataSetGenes(new HashSet<Integer>((Set<Integer>) map.getDataSet(LegacySupport.DATASET2).getExpressionSets().getGeneIds()));
}
}
}
//iterate over the networks
for (Iterator<Long> j = enrichmentMapMap.keySet().iterator(); j.hasNext(); ) {
Long id = j.next();
EnrichmentMap map = enrichmentMapMap.get(id);
//only initialize objects if there is a map for this network
if (map != null) {
if (map.getDataSets().size() > 1) {
Set<Integer> dataset1_genes = map.getDataSets().get(LegacySupport.DATASET1).getDataSetGenes();
Set<Integer> dataset2_genes = map.getDataSets().get(LegacySupport.DATASET2).getDataSetGenes();
if (!dataset1_genes.equals(dataset2_genes))
map.setDistinctExpressionSets(true);
}
//initialize the Genesets (makes sure the leading edge is set correctly)
//Initialize the set of genesets and GSEA results that we want to compute over
InitializeGenesetsOfInterestTask genesets_init = new InitializeGenesetsOfInterestTask(map);
// MKTODO really?
genesets_init.setThrowIfMissing(false);
genesets_init.initializeSets(null);
// //for each map compute the similarity matrix, (easier than storing it) compute the geneset similarities
// ComputeSimilarityTask similarities = new ComputeSimilarityTask(map, ComputeSimilarityTask.ENRICHMENT);
// Map<String, GenesetSimilarity> similarity_results = similarities.computeGenesetSimilarities(null);
// map.setGenesetSimilarity(similarity_results);
//
// // also compute geneset similarities between Enrichment- and Signature Genesets (if any)
// if (! map.getSignatureGenesets().isEmpty()){
// ComputeSimilarityTask sigSimilarities = new ComputeSimilarityTask(map, ComputeSimilarityTask.SIGNATURE);
// Map<String, GenesetSimilarity> sig_similarity_results = sigSimilarities.computeGenesetSimilarities(null);
// map.getGenesetSimilarity().putAll(sig_similarity_results);
// }
}
//end of if(map != null)
}
for (Iterator<Long> j = enrichmentMapMap.keySet().iterator(); j.hasNext(); ) {
Long id = j.next();
CyNetwork currentNetwork = cyNetworkManager.getNetwork(id);
EnrichmentMap map = enrichmentMapMap.get(id);
map.setLegacy(true);
emManager.registerEnrichmentMap(map);
if (!j.hasNext()) {
//set the last network to be the one viewed and initialize the parameters panel
cyApplicationManager.setCurrentNetwork(currentNetwork);
}
}
} catch (Exception ee) {
ee.printStackTrace();
}
}
use of org.baderlab.csplugins.enrichmentmap.model.SetOfEnrichmentResults in project EnrichmentMapApp by BaderLab.
the class ParseGenericEnrichmentResults method parseLines.
@Override
public void parseLines(List<String> lines, EMDataSet dataset, TaskMonitor taskMonitor) {
if (taskMonitor == null)
taskMonitor = new NullTaskMonitor();
taskMonitor.setTitle("Parsing Generic Result file");
//Get the current genesets so we can check that all the results are in the geneset list
//and put the size of the genesets into the visual style
Map<String, GeneSet> genesets = dataset.getSetOfGeneSets().getGeneSets();
int currentProgress = 0;
int maxValue = lines.size();
taskMonitor.setStatusMessage("Parsing Generic Results file - " + maxValue + " rows");
boolean FDR = false;
//skip the first line which just has the field names (start i=1)
//check to see how many columns the data has
String line = lines.get(0);
String[] tokens = line.split("\t");
int length = tokens.length;
EnrichmentMap map = dataset.getMap();
SetOfEnrichmentResults enrichments = dataset.getEnrichments();
Map<String, EnrichmentResult> results = enrichments.getEnrichments();
String upPhenotype = enrichments.getPhenotype1();
String downPhenotype = enrichments.getPhenotype2();
//check to see if there are genesets.
//if there are no genesets then populate the genesets from the generic file
//can only do this if the 6th column has a list of genes for that geneset.
boolean populate_gs = false;
if (genesets == null || genesets.isEmpty())
populate_gs = true;
else
//as this is the default for gprofiler use the Description in the visual style instead of the formatted name
//but only if there is a gmt supplied. If using just the generic output file there is not field for description
dataset.getMap().getParams().setEMgmt(true);
for (int i = 1; i < lines.size(); i++) {
line = lines.get(i);
tokens = line.split("\t");
//update the length each time because some line might have missing values
length = tokens.length;
double pvalue = 1.0;
double FDRqvalue = 1.0;
GenericResult result;
int gs_size = 0;
double NES = 1.0;
//The first column of the file is the name of the geneset
final String name = tokens[0].toUpperCase().trim();
final String description = tokens[1].toUpperCase();
if (genesets.containsKey(name)) {
gs_size = genesets.get(name).getGenes().size();
}
//The third column is the nominal p-value
if (tokens[2] == null || tokens[2].equalsIgnoreCase("")) {
//do nothing
} else {
pvalue = Double.parseDouble(tokens[2]);
}
if (length > 3) {
//the fourth column is the FDR q-value
if (tokens[3] == null || tokens[3].equalsIgnoreCase("")) {
//do nothing
} else {
FDRqvalue = Double.parseDouble(tokens[3]);
FDR = true;
}
// and if it is a number the only important part is the sign
if (length > 4) {
if (tokens[4] == null || tokens[4].equalsIgnoreCase("")) {
} else {
//check to see if the string matches the specified phenotypes
if (tokens[4].equalsIgnoreCase(upPhenotype))
NES = 1.0;
else if (tokens[4].equalsIgnoreCase(downPhenotype))
NES = -1.0;
else //try and see if the user has specified the phenotype as a number
{
try {
NES = Double.parseDouble(tokens[4]);
} catch (NumberFormatException nfe) {
throw new IllegalThreadStateException(tokens[4] + " is not a valid phenotype. Phenotype specified in generic enrichment results file must have the same phenotype as specified in advanced options or must be a positive or negative number.");
}
}
}
//its enrichment
if (length > 5 && populate_gs) {
//get all the genes in the field
String[] gene_tokens = tokens[5].split(",");
ImmutableSet.Builder<Integer> builder = ImmutableSet.builder();
//All subsequent fields in the list are the geneset associated with this geneset.
for (String token : gene_tokens) {
String gene = token.trim().toUpperCase();
//if it is already in the hash then get its associated key and put it into the set of genes
if (map.containsGene(gene)) {
builder.add(map.getHashFromGene(gene));
} else if (!gene.isEmpty()) {
Integer hash = map.addGene(gene).get();
builder.add(hash);
}
}
GeneSet gs = new GeneSet(name, description, builder.build());
gs_size = gs.getGenes().size();
//put the new or filtered geneset back into the set.
genesets.put(name, gs);
}
//end of tokens>5
result = new GenericResult(name, description, pvalue, gs_size, FDRqvalue, NES);
} else
//end of tokens>4
result = new GenericResult(name, description, pvalue, gs_size, FDRqvalue);
} else {
result = new GenericResult(name, description, pvalue, gs_size);
}
// Calculate Percentage. This must be a value between 0..100.
int percentComplete = (int) (((double) currentProgress / maxValue) * 100);
taskMonitor.setProgress(percentComplete);
currentProgress++;
//check to see if the gene set has already been entered in the results
//it is possible that one geneset will be in both phenotypes.
//if it is already exists then we want to make sure the one retained is the result with the
//lower p-value.
//ticket #149
GenericResult temp = (GenericResult) results.get(name);
if (temp == null)
results.put(name, result);
else {
if (result.getPvalue() < temp.getPvalue())
results.put(name, result);
}
}
if (FDR)
dataset.getMap().getParams().setFDR(FDR);
}
use of org.baderlab.csplugins.enrichmentmap.model.SetOfEnrichmentResults in project EnrichmentMapApp by BaderLab.
the class LegacySessionLoadTest method test_1_LoadedLegacyData.
@Test
@SessionFile("em_session_2.2.cys")
public void test_1_LoadedLegacyData() throws Exception {
EnrichmentMap map = getEnrichmentMap();
assertEquals("EM1_Enrichment Map", map.getName());
CyNetwork network = networkManager.getNetwork(map.getNetworkID());
assertNotNull(network);
assertEquals(1, map.getDataSetCount());
assertEquals(14067, map.getNumberOfGenes());
assertEquals(14067, map.getAllGenes().size());
// Number of edges: 3339 - that's how many geneset similarity objects there should be!!!
CyTable edgeTable = network.getDefaultEdgeTable();
assertEquals(3339, edgeTable.getRowCount());
EMCreationParameters params = map.getParams();
String prefix = params.getAttributePrefix();
assertEquals("EM1_", prefix);
assertEquals(0.5, params.getCombinedConstant(), 0.0);
assertFalse(params.isEMgmt());
assertEquals("Geneset_Overlap", params.getEnrichmentEdgeType());
assertTrue(params.isFDR());
assertEquals(GreatFilter.HYPER, params.getGreatFilter());
assertEquals(0.005, params.getPvalue(), 0.0);
assertEquals(1.0, params.getPvalueMin(), 0.0);
assertEquals(0.1, params.getQvalue(), 0.0);
assertEquals(1.0, params.getQvalueMin(), 0.0);
assertEquals(0.5, params.getSimilarityCutoff(), 0.0);
assertEquals(SimilarityMetric.OVERLAP, params.getSimilarityMetric());
// assertFalse(params.isDistinctExpressionSets());
String geneset1 = "RESOLUTION OF SISTER CHROMATID COHESION%REACTOME%REACT_150425.2";
String geneset2 = "CHROMOSOME, CENTROMERIC REGION%GO%GO:0000775";
Collection<CyRow> rows = edgeTable.getMatchingRows(CyNetwork.NAME, geneset1 + " (Geneset_Overlap) " + geneset2);
assertEquals(1, rows.size());
CyRow row = rows.iterator().next();
assertEquals("Geneset_Overlap", row.get(CyEdge.INTERACTION, String.class));
assertEquals(0.6097560975609756, EMStyleBuilder.Columns.EDGE_SIMILARITY_COEFF.get(row, prefix), 0.0);
EMDataSet dataset = map.getDataSet("Dataset 1");
assertNotNull(dataset);
assertSame(map, dataset.getMap());
assertEquals(Method.GSEA, dataset.getMethod());
assertEquals(12653, dataset.getDataSetGenes().size());
assertEquals(389, dataset.getGeneSetsOfInterest().getGeneSets().size());
// assertEquals(17259, dataset.getSetofgenesets().getGenesets().size()); // MKTODO why? what is this used for
assertEndsWith(dataset.getSetOfGeneSets().getFilename(), "Human_GO_AllPathways_no_GO_iea_April_15_2013_symbol.gmt");
for (long suid : dataset.getNodeSuids()) {
assertNotNull(network.getNode(suid));
}
GeneSet geneset = dataset.getGeneSetsOfInterest().getGeneSets().get("NCRNA PROCESSING%GO%GO:0034470");
assertEquals(88, geneset.getGenes().size());
assertEquals("NCRNA PROCESSING%GO%GO:0034470", geneset.getName());
assertEquals("ncRNA processing", geneset.getDescription());
assertEquals(Optional.of("GO"), geneset.getSource());
SetOfEnrichmentResults enrichments = dataset.getEnrichments();
assertEquals(4756, enrichments.getEnrichments().size());
assertEndsWith(enrichments.getFilename1(), "gsea_report_for_ES12_1473194913081.xls");
assertEndsWith(enrichments.getFilename2(), "gsea_report_for_NT12_1473194913081.xls");
assertEquals("ES12", enrichments.getPhenotype1());
assertEquals("NT12", enrichments.getPhenotype2());
EnrichmentResult result = enrichments.getEnrichments().get("RIBONUCLEOSIDE TRIPHOSPHATE BIOSYNTHETIC PROCESS%GO%GO:0009201");
assertTrue(result instanceof GSEAResult);
GSEAResult gseaResult = (GSEAResult) result;
assertEquals("RIBONUCLEOSIDE TRIPHOSPHATE BIOSYNTHETIC PROCESS%GO%GO:0009201", gseaResult.getName());
assertEquals(0.42844063, gseaResult.getES(), 0.0);
assertEquals(0.45225498, gseaResult.getFdrqvalue(), 0.0);
assertEquals(1.0, gseaResult.getFwerqvalue(), 0.0);
assertEquals(23, gseaResult.getGsSize());
assertEquals(1.1938541, gseaResult.getNES(), 0.0);
assertEquals(0.2457786, gseaResult.getPvalue(), 0.0);
assertEquals(4689, gseaResult.getRankAtMax());
assertEquals(Optional.of("GO"), gseaResult.getSource());
GeneExpressionMatrix expressions = dataset.getExpressionSets();
assertEquals(20326, expressions.getExpressionUniverse());
assertEquals(3.686190609, expressions.getClosesttoZero(), 0.0);
// assertEndsWith(expressions.getFilename(), "MCF7_ExprMx_v2_names.gct");
assertEquals(15380.42388, expressions.getMaxExpression(), 0.0);
assertEquals(3.686190609, expressions.getMinExpression(), 0.0);
assertEquals(20, expressions.getNumConditions());
assertEquals(12653, expressions.getExpressionMatrix().size());
assertEquals(12653, expressions.getExpressionMatrix_rowNormalized().size());
GeneExpression expression = expressions.getExpressionMatrix().get(0);
assertEquals("MOCOS", expression.getName());
assertEquals("MOCOS (molybdenum cofactor sulfurase)", expression.getDescription());
assertEquals(18, expression.getExpression().length);
Ranking ranking = expressions.getRanks().get("GSEARanking");
assertEquals(12653, ranking.getAllRanks().size());
assertEquals(12653, ranking.getRanking().size());
Rank rank = ranking.getRanking().get(0);
assertEquals("MOCOS", rank.getName());
assertEquals(1238, rank.getRank().intValue());
assertEquals(0.54488367, rank.getScore(), 0.0);
DataSetFiles files = dataset.getDataSetFiles();
assertEndsWith(files.getClassFile(), "ES_NT.cls");
assertEndsWith(files.getEnrichmentFileName1(), "gsea_report_for_ES12_1473194913081.xls");
assertEndsWith(files.getEnrichmentFileName2(), "gsea_report_for_NT12_1473194913081.xls");
// assertEndsWith(files.getExpressionFileName(), "MCF7_ExprMx_v2_names.gct");
assertEndsWith(files.getGMTFileName(), "Human_GO_AllPathways_no_GO_iea_April_15_2013_symbol.gmt");
assertEndsWith(files.getGseaHtmlReportFile(), "estrogen_treatment_12hr_gsea_enrichment_results.Gsea.1473194913081/index.html");
assertEndsWith(files.getRankedFile(), "ranked_gene_list_ES12_versus_NT12_1473194913081.xls");
assertEquals("ES12", files.getPhenotype1());
assertEquals("NT12", files.getPhenotype2());
}
use of org.baderlab.csplugins.enrichmentmap.model.SetOfEnrichmentResults in project EnrichmentMapApp by BaderLab.
the class CreateGMTEnrichmentMapTask method buildEnrichmentMap.
public void buildEnrichmentMap() {
dataset.setMethod(Method.Generic);
// in this case all the genesets are of interest
dataset.setGeneSetsOfInterest(dataset.getSetOfGeneSets());
Map<String, GeneSet> currentSets = dataset.getSetOfGeneSets().getGeneSets();
// create an new Set of Enrichment Results
SetOfEnrichmentResults setOfEnrichments = new SetOfEnrichmentResults();
Map<String, EnrichmentResult> currentEnrichments = setOfEnrichments.getEnrichments();
// need also to put all genesets into enrichment results
for (Iterator<String> i = currentSets.keySet().iterator(); i.hasNext(); ) {
String geneset1Name = i.next();
GeneSet gs = currentSets.get(geneset1Name);
GenericResult tempResult = new GenericResult(gs.getName(), gs.getDescription(), 0.01, gs.getGenes().size());
currentEnrichments.put(gs.getName(), tempResult);
}
dataset.setEnrichments(setOfEnrichments);
}
Aggregations