use of dr.util.Attribute in project beast-mcmc by beast-dev.
the class TreePriorGenerator method writeTreePriorModel.
// void writeTreePrior(PartitionTreePrior prior, PartitionTreeModel model, XMLWriter writer) { // for species, partitionName.treeModel
// setModelPrefix(prior.getPrefix()); // only has prefix, if (options.getPartitionTreePriors().size() > 1)
//
// writePriorLikelihood(prior, model, writer);
// }
/**
* Write a tree prior (coalescent or speciational) model
*
* @param prior the partition tree prior
* @param writer the writer
*/
void writeTreePriorModel(PartitionTreePrior prior, XMLWriter writer) {
// only has prefix, if (options.getPartitionTreePriors().size() > 1)
setModelPrefix(prior.getPrefix());
String initialPopSize = null;
TreePriorType nodeHeightPrior = prior.getNodeHeightPrior();
Units.Type units = options.units;
TreePriorParameterizationType parameterization = prior.getParameterization();
switch(nodeHeightPrior) {
case CONSTANT:
writer.writeComment("A prior assumption that the population size has remained constant", "throughout the time spanned by the genealogy.");
writer.writeOpenTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "constant"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(options.units)) });
writer.writeOpenTag(ConstantPopulationModelParser.POPULATION_SIZE);
writeParameter("constant.popSize", prior, writer);
writer.writeCloseTag(ConstantPopulationModelParser.POPULATION_SIZE);
writer.writeCloseTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL);
break;
case EXPONENTIAL:
// generate an exponential prior tree
writer.writeComment("A prior assumption that the population size has grown exponentially", "throughout the time spanned by the genealogy.");
writer.writeOpenTag(ExponentialGrowthModelParser.EXPONENTIAL_GROWTH_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "exponential"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(options.units)) });
// write pop size socket
writer.writeOpenTag(ExponentialGrowthModelParser.POPULATION_SIZE);
writeParameter("exponential.popSize", prior, writer);
writer.writeCloseTag(ExponentialGrowthModelParser.POPULATION_SIZE);
if (parameterization == TreePriorParameterizationType.GROWTH_RATE) {
// write growth rate socket
writer.writeOpenTag(ExponentialGrowthModelParser.GROWTH_RATE);
writeParameter("exponential.growthRate", prior, writer);
writer.writeCloseTag(ExponentialGrowthModelParser.GROWTH_RATE);
} else {
// write doubling time socket
writer.writeOpenTag(ExponentialGrowthModelParser.DOUBLING_TIME);
writeParameter("exponential.doublingTime", prior, writer);
writer.writeCloseTag(ExponentialGrowthModelParser.DOUBLING_TIME);
}
writer.writeCloseTag(ExponentialGrowthModelParser.EXPONENTIAL_GROWTH_MODEL);
break;
case LOGISTIC:
// generate an exponential prior tree
writer.writeComment("A prior assumption that the population size has grown logistically", "throughout the time spanned by the genealogy.");
writer.writeOpenTag(LogisticGrowthModelParser.LOGISTIC_GROWTH_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "logistic"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(options.units)) });
// write pop size socket
writer.writeOpenTag(LogisticGrowthModelParser.POPULATION_SIZE);
writeParameter("logistic.popSize", prior, writer);
writer.writeCloseTag(LogisticGrowthModelParser.POPULATION_SIZE);
if (parameterization == TreePriorParameterizationType.GROWTH_RATE) {
// write growth rate socket
writer.writeOpenTag(LogisticGrowthModelParser.GROWTH_RATE);
writeParameter("logistic.growthRate", prior, writer);
writer.writeCloseTag(LogisticGrowthModelParser.GROWTH_RATE);
} else {
// write doubling time socket
writer.writeOpenTag(LogisticGrowthModelParser.DOUBLING_TIME);
writeParameter("logistic.doublingTime", prior, writer);
writer.writeCloseTag(LogisticGrowthModelParser.DOUBLING_TIME);
}
// write logistic t50 socket
writer.writeOpenTag(LogisticGrowthModelParser.TIME_50);
// if (options.clockModelOptions.getRateOptionClockModel() == FixRateType.FIX_MEAN
// || options.clockModelOptions.getRateOptionClockModel() == FixRateType.RELATIVE_TO) {
// writer.writeComment("No calibration");
// writer.writeComment("logistic.t50 initial always has to < treeRootHeight initial");
// dr.app.beauti.options.Parameter priorPara = prior.getParameter("logistic.t50");
//
// double initRootHeight;
// if (options.isShareSameTreePrior()) {
// initRootHeight = priorPara.initial;
// for (PartitionTreeModel tree : options.getPartitionTreeModels()) {
// double tmpRootHeight = tree.getParameter("treeModel.rootHeight").initial;
// if (initRootHeight > tmpRootHeight) { // take min
// initRootHeight = tmpRootHeight;
// }
// }
// } else {
// initRootHeight = prior.getTreeModel().getParameter("treeModel.rootHeight").initial;
// }
// // logistic.t50 initial always has to < treeRootHeight initial
// if (priorPara.initial >= initRootHeight) {
// priorPara.initial = initRootHeight / 2; // tree prior.initial has to < treeRootHeight.initial
// }
// } else {
// writer.writeComment("Has calibration");
//
// throw new IllegalArgumentException("This function is not available in this release !");
// }
writeParameter("logistic.t50", prior, writer);
writer.writeCloseTag(LogisticGrowthModelParser.TIME_50);
writer.writeCloseTag(LogisticGrowthModelParser.LOGISTIC_GROWTH_MODEL);
initialPopSize = "logistic.popSize";
break;
case EXPANSION:
// generate an exponential prior tree
writer.writeComment("A prior assumption that the population size has grown exponentially", "from some ancestral population size in the past.");
writer.writeOpenTag(ExpansionModelParser.EXPANSION_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "expansion"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(options.units)) });
// write pop size socket
writeParameter(ExpansionModelParser.POPULATION_SIZE, "expansion.popSize", prior, writer);
if (parameterization == TreePriorParameterizationType.GROWTH_RATE) {
// write growth rate socket
writeParameter(ExpansionModelParser.GROWTH_RATE, "expansion.growthRate", prior, writer);
} else {
// write doubling time socket
writeParameter(ExpansionModelParser.DOUBLING_TIME, "expansion.doublingTime", prior, writer);
}
// write ancestral proportion socket
writeParameter(ExpansionModelParser.ANCESTRAL_POPULATION_PROPORTION, "expansion.ancestralProportion", prior, writer);
writer.writeCloseTag(ExpansionModelParser.EXPANSION_MODEL);
initialPopSize = "expansion.popSize";
break;
case YULE:
case YULE_CALIBRATION:
if (nodeHeightPrior == TreePriorType.YULE_CALIBRATION) {
writer.writeComment("Calibrated Yule: Heled J, Drummond AJ (2011), Syst Biol, doi: " + "10.1093/sysbio/syr087");
} else {
writer.writeComment("A prior on the distribution node heights defined given", "a Yule speciation process (a pure birth process).");
}
writer.writeOpenTag(YuleModelParser.YULE_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + YuleModelParser.YULE), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(units)) });
writeParameter(YuleModelParser.BIRTH_RATE, "yule.birthRate", prior, writer);
writer.writeCloseTag(YuleModelParser.YULE_MODEL);
break;
case BIRTH_DEATH:
case BIRTH_DEATH_INCOMPLETE_SAMPLING:
writer.writeComment("A prior on the distribution node heights defined given");
writer.writeComment(nodeHeightPrior == TreePriorType.BIRTH_DEATH_INCOMPLETE_SAMPLING ? BirthDeathModelParser.getCitationRHO() : BirthDeathModelParser.getCitation());
writer.writeOpenTag(BirthDeathModelParser.BIRTH_DEATH_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + BirthDeathModelParser.BIRTH_DEATH), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(units)) });
writeParameter(BirthDeathModelParser.BIRTHDIFF_RATE, BirthDeathModelParser.MEAN_GROWTH_RATE_PARAM_NAME, prior, writer);
writeParameter(BirthDeathModelParser.RELATIVE_DEATH_RATE, BirthDeathModelParser.RELATIVE_DEATH_RATE_PARAM_NAME, prior, writer);
if (nodeHeightPrior == TreePriorType.BIRTH_DEATH_INCOMPLETE_SAMPLING) {
writeParameter(BirthDeathModelParser.SAMPLE_PROB, BirthDeathModelParser.BIRTH_DEATH + "." + BirthDeathModelParser.SAMPLE_PROB, prior, writer);
}
writer.writeCloseTag(BirthDeathModelParser.BIRTH_DEATH_MODEL);
break;
case BIRTH_DEATH_SERIAL_SAMPLING:
writer.writeComment(BirthDeathSerialSamplingModelParser.getCitationPsiOrg());
writer.writeOpenTag(BirthDeathSerialSamplingModelParser.BIRTH_DEATH_SERIAL_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + BirthDeathSerialSamplingModelParser.BDSS), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(units)), new Attribute.Default<Boolean>(BirthDeathSerialSamplingModelParser.HAS_FINAL_SAMPLE, false) });
writeParameter(BirthDeathSerialSamplingModelParser.LAMBDA, BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.LAMBDA, prior, writer);
writeParameter(BirthDeathSerialSamplingModelParser.RELATIVE_MU, BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.RELATIVE_MU, prior, writer);
// writeParameter(BirthDeathSerialSamplingModelParser.SAMPLE_PROBABILITY,
// BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.SAMPLE_PROBABILITY, prior, writer);
writeParameter(BirthDeathSerialSamplingModelParser.PSI, BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.PSI, prior, writer);
writeParameter(BirthDeathSerialSamplingModelParser.ORIGIN, BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.ORIGIN, prior, writer);
writer.writeCloseTag(BirthDeathSerialSamplingModelParser.BIRTH_DEATH_SERIAL_MODEL);
break;
case BIRTH_DEATH_BASIC_REPRODUCTIVE_NUMBER:
writer.writeComment(BirthDeathSerialSamplingModelParser.getCitationRT());
writer.writeOpenTag(BirthDeathEpidemiologyModelParser.BIRTH_DEATH_EPIDEMIOLOGY, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + BirthDeathEpidemiologyModelParser.BIRTH_DEATH_EPIDEMIOLOGY), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(units)) });
writeParameter(BirthDeathEpidemiologyModelParser.R0, BirthDeathEpidemiologyModelParser.R0, prior, writer);
writeParameter(BirthDeathEpidemiologyModelParser.RECOVERY_RATE, BirthDeathEpidemiologyModelParser.RECOVERY_RATE, prior, writer);
writeParameter(BirthDeathEpidemiologyModelParser.SAMPLING_PROBABILITY, BirthDeathEpidemiologyModelParser.SAMPLING_PROBABILITY, prior, writer);
writeParameter(BirthDeathEpidemiologyModelParser.ORIGIN, BirthDeathEpidemiologyModelParser.ORIGIN, prior, writer);
writer.writeCloseTag(BirthDeathEpidemiologyModelParser.BIRTH_DEATH_EPIDEMIOLOGY);
break;
case SPECIES_BIRTH_DEATH:
case SPECIES_YULE:
case SPECIES_YULE_CALIBRATION:
writer.writeComment("A prior assumption that the population size has remained constant");
writer.writeComment("throughout the time spanned by the genealogy.");
if (nodeHeightPrior == TreePriorType.SPECIES_YULE_CALIBRATION)
writer.writeComment("Calibrated Yule: Heled J, Drummond AJ (2011), Syst Biol, doi: " + "10.1093/sysbio/syr087");
writer.writeOpenTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "constant"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(options.units)) });
// initial value for pop mean is the same as what used to be the value for the population size
Parameter para = options.starBEASTOptions.getParameter(TraitData.TRAIT_SPECIES + "." + options.starBEASTOptions.POP_MEAN);
prior.getParameter("constant.popSize").setInitial(para.getInitial());
writer.writeOpenTag(ConstantPopulationModelParser.POPULATION_SIZE);
writeParameter("constant.popSize", prior, writer);
writer.writeCloseTag(ConstantPopulationModelParser.POPULATION_SIZE);
writer.writeCloseTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL);
break;
}
if ((!options.useStarBEAST) && nodeHeightPrior != TreePriorType.CONSTANT && nodeHeightPrior != TreePriorType.EXPONENTIAL) {
// If the node height prior is not one of these two then we need to simulate a
// random starting tree under a constant size coalescent.
writer.writeComment("This is a simple constant population size coalescent model", "that is used to generate an initial tree for the chain.");
writer.writeOpenTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, modelPrefix + "initialDemo"), new Attribute.Default<String>("units", Units.Utils.getDefaultUnitName(units)) });
writer.writeOpenTag(ConstantPopulationModelParser.POPULATION_SIZE);
if (initialPopSize != null) {
writer.writeIDref(ParameterParser.PARAMETER, modelPrefix + initialPopSize);
} else {
writeParameter(modelPrefix + "initialDemo.popSize", 1, 100.0, Double.NaN, Double.NaN, writer);
}
writer.writeCloseTag(ConstantPopulationModelParser.POPULATION_SIZE);
writer.writeCloseTag(ConstantPopulationModelParser.CONSTANT_POPULATION_MODEL);
}
// if (nodeHeightPrior == TreePriorType.BIRTH_DEATH_BASIC_REPRODUCTIVE_NUMBER) {
// writer.writeComment("R0 = b/(b*d+s*r)");
// writer.writeOpenTag(RPNcalculatorStatisticParser.RPN_STATISTIC,
// new Attribute[]{
// new Attribute.Default<String>(XMLParser.ID, modelPrefix + "R0")
// });
//
// writer.writeOpenTag(RPNcalculatorStatisticParser.VARIABLE,
// new Attribute[]{
// new Attribute.Default<String>(Statistic.NAME, modelPrefix + "b")
// });
// writeParameterRef(modelPrefix + BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.LAMBDA, writer);
// writer.writeCloseTag(RPNcalculatorStatisticParser.VARIABLE);
//
// writer.writeOpenTag(RPNcalculatorStatisticParser.VARIABLE,
// new Attribute[]{
// new Attribute.Default<String>(Statistic.NAME, modelPrefix + "d")
// });
// writeParameterRef(modelPrefix + BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.RELATIVE_MU, writer);
// writer.writeCloseTag(RPNcalculatorStatisticParser.VARIABLE);
//
// writer.writeOpenTag(RPNcalculatorStatisticParser.VARIABLE,
// new Attribute[]{
// new Attribute.Default<String>(Statistic.NAME, modelPrefix + "s")
// });
// writeParameterRef(modelPrefix + BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.PSI, writer);
// writer.writeCloseTag(RPNcalculatorStatisticParser.VARIABLE);
//
// writer.writeOpenTag(RPNcalculatorStatisticParser.VARIABLE,
// new Attribute[]{
// new Attribute.Default<String>(Statistic.NAME, modelPrefix + "r")
// });
// writeParameterRef(modelPrefix + BirthDeathSerialSamplingModelParser.BDSS + "." + BirthDeathSerialSamplingModelParser.R, writer);
// writer.writeCloseTag(RPNcalculatorStatisticParser.VARIABLE);
//
// writer.writeOpenTag(RPNcalculatorStatisticParser.EXPRESSION,
// new Attribute[]{
// new Attribute.Default<String>(Statistic.NAME, modelPrefix + "R0")
// });
// writer.writeText(modelPrefix + "b " + modelPrefix + "b " + modelPrefix + "d " + "* " + modelPrefix + "s " + modelPrefix + "r " + "* + /");
// writer.writeCloseTag(RPNcalculatorStatisticParser.EXPRESSION);
//
// writer.writeCloseTag(RPNcalculatorStatisticParser.RPN_STATISTIC);
// }
}
use of dr.util.Attribute in project beast-mcmc by beast-dev.
the class InitialTreeGenerator method writeSubTree.
private void writeSubTree(String treeId, String taxaId, Taxa taxa, PartitionTreeModel model, XMLWriter writer) {
Double height = options.taxonSetsHeights.get(taxa);
if (height == null) {
height = Double.NaN;
}
Attribute[] attributes = new Attribute[] {};
if (treeId != null) {
if (Double.isNaN(height)) {
attributes = new Attribute[] { new Attribute.Default<String>(XMLParser.ID, treeId) };
} else {
attributes = new Attribute[] { new Attribute.Default<String>(XMLParser.ID, treeId), new Attribute.Default<String>(CoalescentSimulatorParser.HEIGHT, "" + height) };
}
} else {
if (!Double.isNaN(height)) {
attributes = new Attribute[] { new Attribute.Default<String>(CoalescentSimulatorParser.HEIGHT, "" + height) };
}
}
// construct a subtree
writer.writeOpenTag(CoalescentSimulatorParser.COALESCENT_SIMULATOR, attributes);
List<Taxa> subsets = new ArrayList<Taxa>();
for (Taxa taxa2 : options.taxonSets) {
boolean sameTree = model.equals(options.taxonSetsTreeModel.get(taxa2));
boolean isMono = options.taxonSetsMono.get(taxa2);
boolean hasHeight = options.taxonSetsHeights.get(taxa2) != null;
boolean isSubset = taxa.containsAll(taxa2);
if (sameTree && (isMono || hasHeight) && taxa2 != taxa && isSubset) {
subsets.add(taxa2);
}
}
List<Taxa> toRemove = new ArrayList<Taxa>();
for (Taxa taxa3 : subsets) {
boolean isSubSubSet = false;
for (Taxa taxa4 : subsets) {
if (!taxa4.equals(taxa3) && taxa4.containsAll(taxa3)) {
isSubSubSet = true;
}
}
if (isSubSubSet) {
toRemove.add(taxa3);
}
}
subsets.removeAll(toRemove);
for (Taxa taxa5 : subsets) {
// remainingTaxa.removeTaxa(taxa5);
writeSubTree(null, null, taxa5, model, writer);
}
if (taxaId == null) {
writer.writeIDref(TaxaParser.TAXA, taxa.getId());
} else {
writer.writeIDref(TaxaParser.TAXA, taxaId);
}
writeInitialDemoModelRef(model, writer);
writer.writeCloseTag(CoalescentSimulatorParser.COALESCENT_SIMULATOR);
}
use of dr.util.Attribute in project beast-mcmc by beast-dev.
the class LogGenerator method writeLogToFile.
/**
* write log to file
*
* @param writer XMLWriter
* @param treePriorGenerator TreePriorGenerator
* @param clockModelGenerator ClockModelGenerator
* @param substitutionModelGenerator SubstitutionModelGenerator
* @param treeLikelihoodGenerator TreeLikelihoodGenerator
*/
public void writeLogToFile(XMLWriter writer, TreePriorGenerator treePriorGenerator, ClockModelGenerator clockModelGenerator, SubstitutionModelGenerator substitutionModelGenerator, TreeLikelihoodGenerator treeLikelihoodGenerator) {
writer.writeComment("write log to file");
if (options.logFileName == null) {
options.logFileName = options.fileNameStem + ".log";
}
writer.writeOpenTag(LoggerParser.LOG, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, "fileLog"), new Attribute.Default<String>(LoggerParser.LOG_EVERY, options.logEvery + ""), new Attribute.Default<String>(LoggerParser.FILE_NAME, options.logFileName), new Attribute.Default<Boolean>(LoggerParser.ALLOW_OVERWRITE_LOG, options.allowOverwriteLog) });
if (options.hasData()) {
writer.writeIDref(CompoundLikelihoodParser.POSTERIOR, "posterior");
}
writer.writeIDref(CompoundLikelihoodParser.PRIOR, "prior");
if (options.hasData()) {
writer.writeIDref(CompoundLikelihoodParser.LIKELIHOOD, "likelihood");
}
if (options.useStarBEAST) {
// species
// coalescent prior
writer.writeIDref(MultiSpeciesCoalescentParser.SPECIES_COALESCENT, TraitData.TRAIT_SPECIES + "." + COALESCENT);
// prior on population sizes
// if (options.speciesTreePrior == TreePriorType.SPECIES_YULE) {
writer.writeIDref(MixedDistributionLikelihoodParser.DISTRIBUTION_LIKELIHOOD, SPOPS);
// } else {
// writer.writeIDref(SpeciesTreeBMPrior.STPRIOR, STP);
// }
// prior on species tree
writer.writeIDref(SpeciationLikelihoodParser.SPECIATION_LIKELIHOOD, SPECIATION_LIKE);
writer.writeIDref(ParameterParser.PARAMETER, TraitData.TRAIT_SPECIES + "." + options.starBEASTOptions.POP_MEAN);
writer.writeIDref(ParameterParser.PARAMETER, SpeciesTreeModelParser.SPECIES_TREE + "." + SPLIT_POPS);
if (options.getPartitionTreePriors().get(0).getNodeHeightPrior() == TreePriorType.SPECIES_BIRTH_DEATH) {
writer.writeIDref(ParameterParser.PARAMETER, TraitData.TRAIT_SPECIES + "." + BirthDeathModelParser.MEAN_GROWTH_RATE_PARAM_NAME);
writer.writeIDref(ParameterParser.PARAMETER, TraitData.TRAIT_SPECIES + "." + BirthDeathModelParser.RELATIVE_DEATH_RATE_PARAM_NAME);
} else if (options.getPartitionTreePriors().get(0).getNodeHeightPrior() == TreePriorType.SPECIES_YULE || options.getPartitionTreePriors().get(0).getNodeHeightPrior() == TreePriorType.SPECIES_YULE_CALIBRATION) {
writer.writeIDref(ParameterParser.PARAMETER, TraitData.TRAIT_SPECIES + "." + YuleModelParser.YULE + "." + YuleModelParser.BIRTH_RATE);
} else {
throw new IllegalArgumentException("Get wrong species tree prior using *BEAST : " + options.getPartitionTreePriors().get(0).getNodeHeightPrior().toString());
}
//Species Tree: tmrcaStatistic
writer.writeIDref(TMRCAStatisticParser.TMRCA_STATISTIC, SpeciesTreeModelParser.SPECIES_TREE + "." + TreeModelParser.ROOT_HEIGHT);
}
for (PartitionTreeModel model : options.getPartitionTreeModels()) {
writer.writeIDref(ParameterParser.PARAMETER, model.getPrefix() + TreeModel.TREE_MODEL + "." + TreeModelParser.ROOT_HEIGHT);
}
// @todo check for redundancy with rootHeight - if no tip dates or given as heights (time before present)
for (PartitionTreeModel model : options.getPartitionTreeModels()) {
if (model.hasTipCalibrations()) {
writer.writeIDref(TMRCAStatisticParser.TMRCA_STATISTIC, model.getPrefix() + TreeModel.TREE_MODEL + ".rootAge");
}
}
if (options.useStarBEAST) {
for (Taxa taxa : options.speciesSets) {
// make tmrca(tree.name) eay to read in log for Tracer
writer.writeIDref(TMRCAStatisticParser.TMRCA_STATISTIC, "tmrca(" + taxa.getId() + ")");
}
} else {
for (Taxa taxa : options.taxonSets) {
// make tmrca(tree.name) eay to read in log for Tracer
PartitionTreeModel treeModel = options.taxonSetsTreeModel.get(taxa);
writer.writeIDref(TMRCAStatisticParser.TMRCA_STATISTIC, "tmrca(" + treeModel.getPrefix() + taxa.getId() + ")");
}
}
for (PartitionTreePrior prior : options.getPartitionTreePriors()) {
treePriorGenerator.writeParameterLog(prior, writer);
}
for (PartitionSubstitutionModel model : options.getPartitionSubstitutionModels()) {
substitutionModelGenerator.writeLog(model, writer);
}
for (PartitionClockModel model : options.getPartitionClockModels()) {
clockModelGenerator.writeLog(model, writer);
}
for (PartitionClockModel model : options.getPartitionClockModels()) {
clockModelGenerator.writeLogStatistic(model, writer);
}
generateInsertionPoint(ComponentGenerator.InsertionPoint.IN_FILE_LOG_PARAMETERS, writer);
treeLikelihoodGenerator.writeTreeLikelihoodReferences(writer);
clockModelGenerator.writeClockLikelihoodReferences(writer);
generateInsertionPoint(ComponentGenerator.InsertionPoint.IN_FILE_LOG_LIKELIHOODS, writer);
// coalescentLikelihood
for (PartitionTreeModel model : options.getPartitionTreeModels()) {
PartitionTreePrior prior = model.getPartitionTreePrior();
treePriorGenerator.writePriorLikelihoodReferenceLog(prior, model, writer);
writer.writeText("");
}
for (PartitionTreePrior prior : options.getPartitionTreePriors()) {
if (prior.getNodeHeightPrior() == TreePriorType.EXTENDED_SKYLINE) {
// only 1 coalescent
writer.writeIDref(CoalescentLikelihoodParser.COALESCENT_LIKELIHOOD, prior.getPrefix() + COALESCENT);
} else if (prior.getNodeHeightPrior() == TreePriorType.SKYGRID) {
writer.writeIDref(GMRFSkyrideLikelihoodParser.SKYGRID_LIKELIHOOD, prior.getPrefix() + "skygrid");
}
}
writer.writeCloseTag(LoggerParser.LOG);
generateInsertionPoint(ComponentGenerator.InsertionPoint.AFTER_FILE_LOG, writer);
}
use of dr.util.Attribute in project beast-mcmc by beast-dev.
the class PatternListGenerator method writePatternList.
/**
* Writes the pattern lists
*
* @param partition the partition data to write the pattern lists for
* @param writer the writer
*/
public void writePatternList(PartitionData partition, XMLWriter writer) {
writer.writeText("");
AncestralStatesComponentOptions ancestralStatesOptions = (AncestralStatesComponentOptions) options.getComponentOptions(AncestralStatesComponentOptions.class);
SequenceErrorModelComponentOptions sequenceErrorOptions = (SequenceErrorModelComponentOptions) options.getComponentOptions(SequenceErrorModelComponentOptions.class);
PartitionSubstitutionModel model = partition.getPartitionSubstitutionModel();
String codonHeteroPattern = model.getCodonHeteroPattern();
int partitionCount = model.getCodonPartitionCount();
boolean isAncestralStatesModel = (!ancestralStatesOptions.usingAncestralStates(partition) && !sequenceErrorOptions.usingSequenceErrorModel(partition));
boolean isCovarionModel = model.getDataType().getType() == DataType.COVARION && model.getBinarySubstitutionModel() == BinaryModelType.BIN_COVARION;
boolean unique = isAncestralStatesModel || isCovarionModel;
boolean strip = isAncestralStatesModel || isCovarionModel;
if (model.getDataType().getType() == DataType.NUCLEOTIDES && codonHeteroPattern != null && partitionCount > 1) {
if (codonHeteroPattern.equals("112")) {
writer.writeComment("The " + (unique ? "unique " : "") + "patterns for codon positions 1 & 2");
writer.writeOpenTag(MergePatternsParser.MERGE_PATTERNS, new Attribute[] { new Attribute.Default<String>(XMLParser.ID, partition.getPrefix() + model.getPrefixCodon(1) + SitePatternsParser.PATTERNS) });
writePatternList(partition, 0, 3, null, unique, strip, writer);
writePatternList(partition, 1, 3, null, unique, strip, writer);
writer.writeCloseTag(MergePatternsParser.MERGE_PATTERNS);
writer.writeComment("The " + (unique ? "unique " : "") + "patterns for codon position 3");
writePatternList(partition, 2, 3, model.getPrefixCodon(2), unique, strip, writer);
} else {
// pattern is 123
for (int i = 1; i <= 3; i++) {
writer.writeComment("The " + (unique ? "unique " : "") + "patterns for codon position " + i);
writePatternList(partition, i - 1, 3, model.getPrefixCodon(i), unique, strip, writer);
}
}
// END: pattern is 123
} else {
writePatternList(partition, 0, 1, "", unique, strip, writer);
}
}
use of dr.util.Attribute in project beast-mcmc by beast-dev.
the class PatternListGenerator method writePatternList.
/**
* Write a single pattern list
*
* @param partition the partition to write a pattern list for
* @param offset offset by
* @param every skip every
* @param writer the writer
*/
private void writePatternList(final PartitionData partition, int offset, int every, final String codonPrefix, final boolean unique, final boolean strip, final XMLWriter writer) {
Alignment alignment = partition.getAlignment();
int from = partition.getFromSite();
int to = partition.getToSite();
int partEvery = partition.getEvery();
if (partEvery > 1 && every > 1)
throw new IllegalArgumentException();
if (from < 1)
from = 1;
every = Math.max(partEvery, every);
from += offset;
// this object is created solely to calculate the number of patterns in the alignment
SitePatterns patterns = new SitePatterns(alignment, null, from - 1, to - 1, every, strip, unique);
writer.writeComment("The " + (unique ? "unique " : "") + "patterns from " + from + " to " + (to > 0 ? to : "end") + ((every > 1) ? " every " + every : ""), "npatterns=" + patterns.getPatternCount());
List<Attribute> attributes = new ArrayList<Attribute>();
// no 11 of 112 codon, which uses mergePatterns id instead
if (codonPrefix != null) {
attributes.add(new Attribute.Default<String>(XMLParser.ID, partition.getPrefix() + codonPrefix + SitePatternsParser.PATTERNS));
}
attributes.add(new Attribute.Default<String>("from", "" + from));
if (to >= 0)
attributes.add(new Attribute.Default<String>("to", "" + to));
if (every > 1) {
attributes.add(new Attribute.Default<String>("every", "" + every));
}
if (!unique) {
attributes.add(new Attribute.Default<Boolean>(SitePatternsParser.UNIQUE, false));
}
if (strip) {
// default true
attributes.add(new Attribute.Default<Boolean>(SitePatternsParser.STRIP, false));
}
// generate <patterns>
writer.writeOpenTag(SitePatternsParser.PATTERNS, attributes);
writer.writeIDref(AlignmentParser.ALIGNMENT, alignment.getId());
writer.writeCloseTag(SitePatternsParser.PATTERNS);
}
Aggregations