use of beast.evolution.alignment.FilteredAlignment in project beast2 by CompEvol.
the class ThreadedTreeLikelihood method initAndValidate.
@Override
public void initAndValidate() {
threadCount = BeastMCMC.m_nThreads;
if (maxNrOfThreadsInput.get() > 0) {
threadCount = Math.min(maxNrOfThreadsInput.get(), BeastMCMC.m_nThreads);
}
String instanceCount = System.getProperty("beast.instance.count");
if (instanceCount != null && instanceCount.length() > 0) {
threadCount = Integer.parseInt(instanceCount);
}
logPByThread = new double[threadCount];
// sanity check: alignment should have same #taxa as tree
if (dataInput.get().getTaxonCount() != treeInput.get().getLeafNodeCount()) {
throw new IllegalArgumentException("The number of nodes in the tree does not match the number of sequences");
}
treelikelihood = new TreeLikelihood[threadCount];
if (dataInput.get().isAscertained) {
Log.warning.println("Note, can only use single thread per alignment because the alignment is ascertained");
threadCount = 1;
}
if (threadCount <= 1) {
treelikelihood[0] = new TreeLikelihood();
treelikelihood[0].setID(getID() + "0");
treelikelihood[0].initByName("data", dataInput.get(), "tree", treeInput.get(), "siteModel", siteModelInput.get(), "branchRateModel", branchRateModelInput.get(), "useAmbiguities", useAmbiguitiesInput.get(), "scaling", scalingInput.get() + "");
treelikelihood[0].getOutputs().add(this);
likelihoodsInput.get().add(treelikelihood[0]);
} else {
pool = Executors.newFixedThreadPool(threadCount);
calcPatternPoints(dataInput.get().getSiteCount());
for (int i = 0; i < threadCount; i++) {
Alignment data = dataInput.get();
String filterSpec = (patternPoints[i] + 1) + "-" + (patternPoints[i + 1]);
if (data.isAscertained) {
filterSpec += data.excludefromInput.get() + "-" + data.excludetoInput.get() + "," + filterSpec;
}
treelikelihood[i] = new TreeLikelihood();
treelikelihood[i].setID(getID() + i);
treelikelihood[i].getOutputs().add(this);
likelihoodsInput.get().add(treelikelihood[i]);
FilteredAlignment filter = new FilteredAlignment();
if (i == 0 && dataInput.get() instanceof FilteredAlignment && ((FilteredAlignment) dataInput.get()).constantSiteWeightsInput.get() != null) {
filter.initByName("data", dataInput.get(), /*, "userDataType", m_data.get().getDataType()*/
"filter", filterSpec, "constantSiteWeights", ((FilteredAlignment) dataInput.get()).constantSiteWeightsInput.get());
} else {
filter.initByName("data", dataInput.get(), /*, "userDataType", m_data.get().getDataType()*/
"filter", filterSpec);
}
treelikelihood[i].initByName("data", filter, "tree", treeInput.get(), "siteModel", duplicate((BEASTInterface) siteModelInput.get(), i), "branchRateModel", duplicate(branchRateModelInput.get(), i), "useAmbiguities", useAmbiguitiesInput.get(), "scaling", scalingInput.get() + "");
likelihoodCallers.add(new TreeLikelihoodCaller(treelikelihood[i], i));
}
}
}
use of beast.evolution.alignment.FilteredAlignment in project Babel by rbouckaert.
the class NexusParser method parseAssumptionsBlock.
// parseDataBlock
/**
* parse assumptions block
* begin assumptions;
* charset firsthalf = 1-449;
* charset secondhalf = 450-898;
* charset third = 1-457\3 662-896\3;
* end;
*
* begin assumptions;
* wtset MySoapWeights (VECTOR) = 13 13 13 50 50 88 8
* end;
*/
void parseAssumptionsBlock(final BufferedReader fin) throws Exception {
if (true) {
return;
}
String str;
do {
str = nextLine(fin);
if (str.toLowerCase().matches("\\s*charset\\s.*")) {
// remove text in brackets (as TreeBase files are wont to contain)
str = str.replaceAll("\\(.*\\)", "");
// clean up spaces
str = str.replaceAll("^\\s+", "");
str = str.replaceAll("\\s*-\\s*", "-");
str = str.replaceAll("\\s*\\\\\\s*", "\\\\");
str = str.replaceAll("\\s*;", "");
final String[] strs = str.trim().split("\\s+");
final String id = strs[1];
String rangeString = "";
for (int i = 3; i < strs.length; i++) {
rangeString += strs[i] + " ";
}
rangeString = rangeString.trim().replace(' ', ',');
final FilteredAlignment alignment = new FilteredAlignment();
alignment.setID(id);
alignment.alignmentInput.setValue(m_alignment, alignment);
alignment.filterInput.setValue(rangeString, alignment);
alignment.initAndValidate();
filteredAlignments.add(alignment);
} else if (str.toLowerCase().matches("\\s*wtset\\s.*")) {
String[] strs = str.split("=");
if (strs.length > 1) {
str = strs[strs.length - 1].trim();
strs = str.split("\\s+");
int[] weights = new int[strs.length];
for (int i = 0; i < strs.length; i++) {
weights[i] = Integer.parseInt(strs[i]);
}
if (m_alignment != null) {
if (weights.length != m_alignment.getSiteCount()) {
throw new RuntimeException("Number of weights (" + weights.length + ") " + "does not match number of sites in alignment(" + m_alignment.getSiteCount() + ")");
}
StringBuilder weightStr = new StringBuilder();
for (String str2 : strs) {
weightStr.append(str2);
weightStr.append(',');
}
weightStr.delete(weightStr.length() - 1, weightStr.length());
m_alignment.siteWeightsInput.setValue(weightStr.toString(), m_alignment);
m_alignment.initAndValidate();
} else {
Log.warning.println("WTSET was specified before alignment. WTSET is ignored.");
}
}
} else if (str.toLowerCase().matches("^\\s*taxset\\s.*")) {
// define taxon set, e.g.
// begin set;
// taxset germanic = english german dutch;
// taxset romance = italian spanish rumenian;
// end;
String[] strs = str.split("=");
if (strs.length > 1) {
String str0 = strs[0].trim();
String[] strs2 = str0.split("\\s+");
if (strs2.length != 2) {
throw new RuntimeException("expected 'taxset <name> = ...;' but did not get two words before the = sign: " + str);
}
String taxonSetName = strs2[1];
str0 = strs[strs.length - 1].trim();
if (!str0.endsWith(";")) {
throw new RuntimeException("expected 'taxset <name> = ...;' semi-colin is missing: " + str);
}
str0 = str0.replaceAll(";", "");
String[] taxonNames = str0.split("\\s+");
TaxonSet taxonset = new TaxonSet();
for (String taxon : taxonNames) {
taxonset.taxonsetInput.get().add(new Taxon(taxon));
}
taxonset.setID(taxonSetName);
taxonsets.add(taxonset);
}
} else if (str.toLowerCase().matches("^\\s*calibrate\\s.*")) {
// define calibration represented by an MRCAPRior,
// taxon sets need to be specified earlier, but can also be a single taxon
// e.g.
// begin mrbayes;
// calibrate germanic = normal(1000,50)
// calibrate hittite = normal(3450,100)
// calibrate english = fixed(0)
// end;
String[] strs = str.split("=");
if (strs.length > 1) {
String str0 = strs[0].trim();
String[] strs2 = str0.split("\\s+");
if (strs2.length != 2) {
throw new RuntimeException("expected 'calibrate <name> = ...' but did not get two words before the = sign: " + str);
}
// first, get the taxon
String taxonSetName = strs2[1].replaceAll("'\"", "");
Taxon taxonset = null;
for (Taxon t : taxonsets) {
if (t.getID().equals(taxonSetName)) {
taxonset = t;
}
}
if (taxonset == null) {
for (String t : taxa) {
if (t.equals(taxonSetName)) {
taxonset = new Taxon(t);
}
}
}
if (taxonset == null) {
throw new RuntimeException("Could not find taxon/taxonset " + taxonSetName + " in calibration: " + str);
}
// next get the calibration
str0 = strs[strs.length - 1].trim();
String[] strs3 = str0.split("[\\(,\\)]");
ParametricDistribution distr = null;
switch(strs3[0]) {
case "normal":
distr = new Normal();
distr.initByName("mean", strs3[1], "sigma", strs3[2]);
distr.setID("Normal.0");
break;
case "uniform":
distr = new Uniform();
distr.initByName("lower", strs3[1], "upper", strs3[2]);
distr.setID("Uniform.0");
break;
case "fixed":
// uniform with lower == upper
distr = new Uniform();
distr.initByName("lower", strs3[1], "upper", strs3[1]);
distr.setID("Uniform.0");
break;
case "offsetlognormal":
distr = new LogNormalDistributionModel();
distr.initByName("offset", strs3[1], "M", strs3[2], "S", strs3[3], "meanInRealSpace", true);
distr.setID("LogNormal.0");
break;
case "lognormal":
distr = new LogNormalDistributionModel();
distr.initByName("M", strs3[1], "S", strs3[2], "meanInRealSpace", true);
distr.setID("LogNormal.0");
break;
case "offsetexponential":
distr = new Exponential();
distr.initByName("offset", strs3[1], "mean", strs3[2]);
distr.setID("Exponential.0");
break;
case "gamma":
distr = new Gamma();
distr.initByName("alpha", strs3[1], "beta", strs3[2]);
distr.setID("Gamma.0");
break;
case "offsetgamma":
distr = new Gamma();
distr.initByName("offset", strs3[1], "alpha", strs3[2], "beta", strs3[3]);
distr.setID("Gamma.0");
break;
default:
throw new RuntimeException("Unknwon distribution " + strs3[0] + "in calibration: " + str);
}
MRCAPrior prior = new MRCAPrior();
prior.isMonophyleticInput.setValue(true, prior);
prior.distInput.setValue(distr, prior);
prior.taxonsetInput.setValue(taxonset, prior);
prior.setID(taxonset.getID() + ".prior");
// should set Tree before initialising, but we do not know the tree yet...
calibrations.add(prior);
}
}
} while (!str.toLowerCase().contains("end;"));
}
use of beast.evolution.alignment.FilteredAlignment in project feast by tgvaughan.
the class SequenceExtractor method main.
public static void main(String[] args) {
if (args.length < 2) {
System.out.println("Usage: SequenceExtractor " + "beast_input_file.xml output_file.nexus");
System.exit(0);
}
try {
XMLParser parser = new XMLParser();
Runnable runnable = parser.parseFile(new File(args[0]));
Set<Alignment> alignments = findAlignments(runnable);
if (alignments.isEmpty()) {
System.out.println("No alignments found!");
System.exit(1);
}
if (alignments.size() > 1) {
String prefix;
int extIndex = args[0].lastIndexOf('.');
if (extIndex < 0) {
// No extension
prefix = args[0];
} else {
prefix = args[0].substring(0, extIndex);
}
for (Alignment alignment : alignments) {
if (alignment instanceof FilteredAlignment)
continue;
NexusWriter.write(alignment, null, new PrintStream(prefix + "_" + alignment.getID() + ".nexus"));
}
} else {
NexusWriter.write((Alignment) alignments.toArray()[0], null, new PrintStream(args[1]));
}
} catch (FileNotFoundException ex) {
System.out.println("Could not find file '" + args[0] + "'.");
System.exit(1);
} catch (IllegalArgumentException | IllegalAccessException ex) {
Logger.getLogger(SequenceExtractor.class.getName()).log(Level.SEVERE, null, ex);
} catch (Exception ex) {
Logger.getLogger(SequenceExtractor.class.getName()).log(Level.SEVERE, null, ex);
}
}
Aggregations