use of dr.inference.markovjumps.StateHistory in project beast-mcmc by beast-dev.
the class StateHistoryTest method testFreqDistribution.
public void testFreqDistribution() {
System.out.println("Start of FreqDistribution test");
int startingState = 0;
// 10 expected substitutions is close to \infty
double duration = 10;
double[] freq = new double[stateCount];
for (int i = 0; i < N; i++) {
StateHistory simultant = StateHistory.simulateUnconditionalOnEndingState(0.0, startingState, duration, lambda, stateCount);
freq[simultant.getEndingState()]++;
}
for (int i = 0; i < stateCount; i++) {
freq[i] /= N;
}
System.out.println("freq = " + new Vector(freq));
assertEquals(freq, freqModel.getFrequencies(), 1E-3);
System.out.println("End of FreqDistribution test\n");
}
use of dr.inference.markovjumps.StateHistory in project beast-mcmc by beast-dev.
the class StateHistoryTest method testCounts.
public void testCounts() {
System.out.println("State of Counts test");
int startingState = 2;
double duration = 0.5;
int[] counts = new int[stateCount * stateCount];
double[] expectedCounts = new double[stateCount * stateCount];
for (int i = 0; i < N; i++) {
StateHistory simultant = StateHistory.simulateUnconditionalOnEndingState(0.0, startingState, duration, lambda, stateCount);
simultant.accumulateSufficientStatistics(counts, null);
}
for (int i = 0; i < stateCount * stateCount; i++) {
expectedCounts[i] = (double) counts[i] / (double) N;
}
double[] r = new double[stateCount * stateCount];
double[] joint = new double[stateCount * stateCount];
double[] analytic = new double[stateCount * stateCount];
for (int from = 0; from < stateCount; from++) {
for (int to = 0; to < stateCount; to++) {
double marginal = 0;
if (from != to) {
MarkovJumpsCore.fillRegistrationMatrix(r, from, to, stateCount);
markovjumps.setRegistration(r);
markovjumps.computeJointStatMarkovJumps(duration, joint);
for (int j = 0; j < stateCount; j++) {
// Marginalize out ending state
marginal += joint[startingState * stateCount + j];
}
}
analytic[from * stateCount + to] = marginal;
}
}
System.out.println("unconditional expected counts = " + new Vector(expectedCounts));
System.out.println("analytic counts = " + new Vector(analytic));
assertEquals(expectedCounts, analytic, 1E-3);
System.out.println("End of Counts test\n");
}
use of dr.inference.markovjumps.StateHistory in project beast-mcmc by beast-dev.
the class StateHistoryTest method testGetLogLikelihood.
public void testGetLogLikelihood() {
System.out.println("Start of getLogLikelihood test");
int startingState = 1;
int endingState = 1;
double duration = 0.5;
int iterations = 5000000;
// int iterations = 1;
double[] probs = new double[16];
baseModel.getTransitionProbabilities(duration, probs);
double trueProb = probs[startingState * 4 + endingState];
System.out.println("Tru prob = " + trueProb);
UniformizedSubstitutionModel uSM = new UniformizedSubstitutionModel(baseModel);
uSM.setSaveCompleteHistory(true);
double logProb = Double.NEGATIVE_INFINITY;
double prob = 0.0;
double condProb = 0.0;
for (int i = 0; i < iterations; ++i) {
uSM.computeCondStatMarkovJumps(startingState, endingState, duration, trueProb);
StateHistory history = uSM.getStateHistory();
// System.out.println(history.getEndingTime() - history.getStartingTime());
assertEquals(history.getEndingTime() - history.getStartingTime(), duration, 10E-3);
assertEquals(history.getStartingState(), startingState);
assertEquals(history.getEndingState(), endingState);
double logLikelihood = history.getLogLikelihood(lambda, 4);
prob += Math.exp(logLikelihood);
logProb = LogTricks.logSum(logProb, logLikelihood);
condProb += Math.exp(-logLikelihood);
// System.err.println(logLikelihood);
}
logProb = Math.exp(logProb - Math.log(iterations));
prob /= iterations;
condProb /= iterations;
System.out.println("Sim prob = " + prob);
System.out.println("Inv prob = " + (1.0 / condProb));
// System.out.println("log prob = " + logProb);
// System.exit(-1);
System.out.println();
System.out.println();
// Try using unconditioned simulation
double marginalProb = 0.0;
double mcProb = 0.0;
double invMcProb = 0.0;
int totalTries = 0;
int i = 0;
while (i < iterations) {
startingState = MathUtils.randomChoicePDF(freqModel.getFrequencies());
StateHistory history = StateHistory.simulateUnconditionalOnEndingState(0, startingState, duration, lambda, 4);
if (// history.getNumberOfJumps() == randomChoice1
true) {
marginalProb += 1.0;
i++;
double logLike = history.getLogLikelihood(lambda, 4);
mcProb += Math.exp(logLike);
invMcProb += Math.exp(-logLike);
// if (i % 100000 == 0) System.out.println(i);
}
totalTries++;
}
marginalProb /= totalTries;
mcProb /= iterations;
invMcProb /= iterations;
System.out.println("Sim uncd = " + marginalProb);
System.out.println("mc prob = " + mcProb);
System.out.println("m2 prob = " + (1.0 / invMcProb));
assertEquals(prob, trueProb);
}
use of dr.inference.markovjumps.StateHistory in project beast-mcmc by beast-dev.
the class UniformizedSubstitutionModel method computeCondStatMarkovJumps.
public double computeCondStatMarkovJumps(int startingState, int endingState, double time, double transitionProbability) {
if (updateSubordinator) {
constructSubordinator();
}
double total = 0;
for (int i = 0; i < numSimulants; i++) {
StateHistory history = null;
try {
history = UniformizedStateHistory.simulateConditionalOnEndingState(0.0, startingState, time, endingState, transitionProbability, stateCount, subordinator);
} catch (SubordinatedProcess.Exception e) {
if (RETURN_NAN) {
if (reportWarning) {
Logger.getLogger("dr.app.beagle").info("Unable to compute a robust count; this is most likely due to poor starting values.");
}
reportWarning = false;
return Double.NaN;
}
// Error in uniformization; try rejection sampling
System.err.println("Attempting rejection sampling after uniformization failure");
substModel.getInfinitesimalMatrix(tmp);
int attempts = 0;
boolean success = false;
while (!success) {
if (attempts >= maxRejectionAttempts) {
throw new RuntimeException("Rejection sampling failure, after uniformization failure");
}
history = StateHistory.simulateUnconditionalOnEndingState(0.0, startingState, time, tmp, stateCount);
if (history.getEndingState() == endingState) {
success = true;
}
attempts++;
}
}
total += getProcessForSimulant(history);
if (saveCompleteHistory) {
if (numSimulants == 1) {
completeHistory = history;
} else {
throw new RuntimeException("Use single simulant when saving complete histories");
}
}
}
return total / (double) numSimulants;
}
use of dr.inference.markovjumps.StateHistory in project beast-mcmc by beast-dev.
the class CodonPartitionedRobustCounting method computeExpectedCountsForBranch.
private double[] computeExpectedCountsForBranch(NodeRef child) {
// Get child node reconstructed sequence
final int[] childSeq0 = partition[0].getStatesForNode(tree, child);
final int[] childSeq1 = partition[1].getStatesForNode(tree, child);
final int[] childSeq2 = partition[2].getStatesForNode(tree, child);
// Get parent node reconstructed sequence
final NodeRef parent = tree.getParent(child);
final int[] parentSeq0 = partition[0].getStatesForNode(tree, parent);
final int[] parentSeq1 = partition[1].getStatesForNode(tree, parent);
final int[] parentSeq2 = partition[2].getStatesForNode(tree, parent);
double branchRateTime = branchRateModel.getBranchRate(tree, child) * tree.getBranchLength(child);
double[] count = new double[numCodons];
if (!useUniformization) {
markovJumps.computeCondStatMarkovJumps(branchRateTime, condMeanMatrix);
} else {
// Fill condMeanMatrix with transition probabilities
markovJumps.getSubstitutionModel().getTransitionProbabilities(branchRateTime, condMeanMatrix);
}
for (int i = 0; i < numCodons; i++) {
// Construct this child and parent codon
final int childState = getCanonicalState(childSeq0[i], childSeq1[i], childSeq2[i]);
final int parentState = getCanonicalState(parentSeq0[i], parentSeq1[i], parentSeq2[i]);
// final int vChildState = getVladimirState(childSeq0[i], childSeq1[i], childSeq2[i]);
// final int vParentState = getVladimirState(parentSeq0[i], parentSeq1[i], parentSeq2[i]);
final double codonCount;
if (!useUniformization) {
codonCount = condMeanMatrix[parentState * 64 + childState];
} else {
codonCount = ((UniformizedSubstitutionModel) markovJumps).computeCondStatMarkovJumps(parentState, childState, branchRateTime, condMeanMatrix[parentState * 64 + childState]);
}
if (useUniformization && saveCompleteHistory) {
UniformizedSubstitutionModel usModel = (UniformizedSubstitutionModel) markovJumps;
if (completeHistoryPerNode == null) {
completeHistoryPerNode = new String[tree.getNodeCount()][numCodons];
}
StateHistory history = usModel.getStateHistory();
// Only report syn or nonsyn changes
double[] register = usModel.getRegistration();
history = history.filterChanges(register);
int historyCount = history.getNumberOfJumps();
if (historyCount > 0) {
double parentTime = tree.getNodeHeight(tree.getParent(child));
double childTime = tree.getNodeHeight(child);
history.rescaleTimesOfEvents(parentTime, childTime);
int n = history.getNumberOfJumps();
// MAS may have broken the next line
String hstring = history.toStringChanges(i + 1, usModel.dataType, false);
if (DEBUG) {
System.err.println("site " + (i + 1) + " : " + history.getNumberOfJumps() + " : " + history.toStringChanges(i + 1, usModel.dataType) + " " + codonLabeling.getText());
}
completeHistoryPerNode[child.getNumber()][i] = hstring;
} else {
completeHistoryPerNode[child.getNumber()][i] = null;
}
}
count[i] = codonCount;
}
return count;
}
Aggregations