use of dr.math.matrixAlgebra.ReadableVector in project beast-mcmc by beast-dev.
the class MultivariateConditionalOnTipsRealizedDelegate method simulateTraitForExternalNode.
private void simulateTraitForExternalNode(final int nodeIndex, final int traitIndex, final int offsetSample, final int offsetParent, final int offsetPartial, final double branchPrecision) {
final DenseMatrix64F P0 = wrap(partialNodeBuffer, offsetPartial + dimTrait, dimTrait, dimTrait);
final int missingCount = countFiniteDiagonals(P0);
if (missingCount == 0) {
// Completely observed
System.arraycopy(partialNodeBuffer, offsetPartial, sample, offsetSample, dimTrait);
} else {
final int zeroCount = countZeroDiagonals(P0);
if (zeroCount == dimTrait) {
// All missing completely at random
// TODO: This is N(X_pa(j), l_j V_root). Why not N(X_pa(j), V_branch) ?
final double sqrtScale = Math.sqrt(1.0 / branchPrecision);
if (NEW_TIP_WITH_NO_DATA) {
// final ReadableVector parentSample = new WrappedVector.Raw(sample, offsetParent, dimTrait);
final ReadableVector M;
M = getMeanBranch(offsetParent);
// if (hasNoDrift) {
// M = parentSample;
// } else {
// M = getMeanWithDrift(parentSample,
// new WrappedVector.Raw(displacementBuffer, 0, dimTrait));
// }
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
M, // input variance
new WrappedMatrix.ArrayOfArray(cholesky), // input variance
sqrtScale, new WrappedVector.Raw(sample, offsetSample, dimTrait), tmpEpsilon);
} else {
// TODO Drift?
assert (!likelihoodDelegate.getDiffusionProcessDelegate().hasDrift());
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
sample, // input mean
offsetParent, // input variance
cholesky, // input variance
sqrtScale, // output sample
sample, // output sample
offsetSample, tmpEpsilon);
}
} else {
if (missingCount == dimTrait) {
// All missing, but not completely at random
simulateTraitForInternalNode(offsetSample, offsetParent, offsetPartial, branchPrecision);
} else {
// Partially observed
// copy observed values
System.arraycopy(partialNodeBuffer, offsetPartial, sample, offsetSample, dimTrait);
final PartiallyMissingInformation.HashedIntArray indices = missingInformation.getMissingIndices(nodeIndex, traitIndex);
final int[] observed = indices.getComplement();
final int[] missing = indices.getArray();
final DenseMatrix64F V1 = getVarianceBranch(branchPrecision);
// final DenseMatrix64F V1 = new DenseMatrix64F(dimTrait, dimTrait);
// CommonOps.scale(1.0 / branchPrecision, Vd, V1);
ConditionalVarianceAndTransform2 transform = new ConditionalVarianceAndTransform2(V1, missing, observed);
// TODO Cache (via delegated function)
final DenseMatrix64F cP0 = new DenseMatrix64F(missing.length, missing.length);
gatherRowsAndColumns(P0, cP0, missing, missing);
final WrappedVector cM2 = transform.getConditionalMean(// Tip value
partialNodeBuffer, // Tip value
offsetPartial, sample, // Parent value
offsetParent);
final DenseMatrix64F cP1 = transform.getConditionalPrecision();
final DenseMatrix64F cP2 = new DenseMatrix64F(missing.length, missing.length);
final DenseMatrix64F cV2 = new DenseMatrix64F(missing.length, missing.length);
// TODO: Shouldn't P0 = 0 always in this situation ?
CommonOps.add(cP0, cP1, cP2);
safeInvert2(cP2, cV2, false);
if (NEW_CHOLESKY) {
DenseMatrix64F cC2 = getCholeskyOfVariance(cV2, missing.length);
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
cM2, // input variance
new WrappedMatrix.WrappedDenseMatrix(cC2), // input variance
1.0, // output sample
new WrappedVector.Indexed(sample, offsetSample, missing, missing.length), tmpEpsilon);
} else {
double[][] cC2 = getCholeskyOfVariance(cV2.getData(), missing.length);
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
cM2, // input variance
new WrappedMatrix.ArrayOfArray(cC2), // input variance
1.0, // output sample
new WrappedVector.Indexed(sample, offsetSample, missing, missing.length), tmpEpsilon);
}
if (DEBUG) {
final WrappedVector M0 = new WrappedVector.Raw(partialNodeBuffer, offsetPartial, dimTrait);
final WrappedVector M1 = new WrappedVector.Raw(sample, offsetParent, dimTrait);
final DenseMatrix64F P1 = new DenseMatrix64F(dimTrait, dimTrait);
CommonOps.scale(branchPrecision, Pd, P1);
final WrappedVector newSample = new WrappedVector.Raw(sample, offsetSample, dimTrait);
System.err.println("sT F E N");
System.err.println("M0: " + M0);
System.err.println("P0: " + P0);
System.err.println("");
System.err.println("M1: " + M1);
System.err.println("P1: " + P1);
System.err.println("");
System.err.println("cP0: " + cP0);
System.err.println("cM2: " + cM2);
System.err.println("cP1: " + cP1);
System.err.println("cP2: " + cP2);
System.err.println("cV2: " + cV2);
// System.err.println("cC2: " + new Matrix(cC2));
System.err.println("SS: " + newSample);
System.err.println("");
}
}
}
}
}
use of dr.math.matrixAlgebra.ReadableVector in project beast-mcmc by beast-dev.
the class ReversibleZigZagOperator method drawInitialMomentum.
@Override
final WrappedVector drawInitialMomentum() {
ReadableVector mass = preconditioning.mass;
double[] momentum = new double[mass.getDim()];
for (int i = 0, len = momentum.length; i < len; i++) {
int sign = (MathUtils.nextDouble() > 0.5) ? 1 : -1;
momentum[i] = sign * MathUtils.nextExponential(1) * Math.sqrt(mass.get(i));
}
if (mask != null) {
applyMask(momentum);
}
return new WrappedVector.Raw(momentum);
}
use of dr.math.matrixAlgebra.ReadableVector in project beast-mcmc by beast-dev.
the class MultivariateConditionalOnTipsRealizedDelegate method simulateTraitForInternalNode.
// private ReadableVector getMeanWithDrift(final ReadableVector mean,
// final ReadableVector drift) {
// return new ReadableVector.Sum(mean, drift);
// }
// private ReadableVector getMeanWithDrift(double[] mean, int offsetMean, double[] drift, int dim) {
// for (int i = 0;i < dim; ++i) {
// tmpDrift[i] = mean[offsetMean + i] + drift[i];
// }
// return new WrappedVector.Raw(tmpDrift, 0, dimTrait);
// }
private void simulateTraitForInternalNode(final int offsetSample, final int offsetParent, final int offsetPartial, final double branchPrecision) {
if (!Double.isInfinite(branchPrecision)) {
// Here we simulate X_j | X_pa(j), Y
final WrappedVector M0 = new WrappedVector.Raw(partialNodeBuffer, offsetPartial, dimTrait);
final DenseMatrix64F P0 = wrap(partialNodeBuffer, offsetPartial + dimTrait, dimTrait, dimTrait);
// final ReadableVector parentSample = new WrappedVector.Raw(sample, offsetParent, dimTrait);
final ReadableVector M1;
final DenseMatrix64F P1;
M1 = getMeanBranch(offsetParent);
P1 = getPrecisionBranch(branchPrecision);
// if (hasNoDrift) {
// M1 = parentSample; // new WrappedVector.Raw(sample, offsetParent, dimTrait);
// P1 = new DenseMatrix64F(dimTrait, dimTrait);
// CommonOps.scale(branchPrecision, Pd, P1);
// } else {
// M1 = getMeanWithDrift(parentSample,
// new WrappedVector.Raw(displacementBuffer, 0, dimTrait)); //getMeanWithDrift(sample, offsetParent, displacementBuffer, dimTrait);
// P1 = DenseMatrix64F.wrap(dimTrait, dimTrait, precisionBuffer);
// }
final WrappedVector M2 = new WrappedVector.Raw(tmpMean, 0, dimTrait);
final DenseMatrix64F P2 = new DenseMatrix64F(dimTrait, dimTrait);
final DenseMatrix64F V2 = new DenseMatrix64F(dimTrait, dimTrait);
CommonOps.add(P0, P1, P2);
safeInvert2(P2, V2, false);
weightedAverage(M0, P0, M1, P1, M2, V2, dimTrait);
final WrappedMatrix C2;
if (NEW_CHOLESKY) {
DenseMatrix64F tC2 = getCholeskyOfVariance(V2, dimTrait);
C2 = new WrappedMatrix.WrappedDenseMatrix(tC2);
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
M2, // input variance
C2, // input variance
1.0, new WrappedVector.Raw(sample, offsetSample, dimTrait), tmpEpsilon);
} else {
double[][] tC2 = getCholeskyOfVariance(V2.getData(), dimTrait);
C2 = new WrappedMatrix.ArrayOfArray(tC2);
MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
M2, // input variance
C2, // input variance
1.0, new WrappedVector.Raw(sample, offsetSample, dimTrait), tmpEpsilon);
}
if (DEBUG) {
System.err.println("sT F I N");
System.err.println("M0: " + M0);
System.err.println("P0: " + P0);
System.err.println("M1: " + M1);
System.err.println("P1: " + P1);
System.err.println("M2: " + M2);
System.err.println("V2: " + V2);
System.err.println("C2: " + C2);
System.err.println("SS: " + new WrappedVector.Raw(sample, offsetSample, dimTrait));
System.err.println("");
if (!check(M2)) {
System.exit(-1);
}
}
} else {
System.arraycopy(sample, offsetParent, sample, offsetSample, dimTrait);
if (DEBUG) {
System.err.println("sT F I N infinite branch precision");
System.err.println("SS: " + new WrappedVector.Raw(sample, offsetSample, dimTrait));
}
}
}
use of dr.math.matrixAlgebra.ReadableVector in project beast-mcmc by beast-dev.
the class BouncyParticleOperator method drawInitialVelocity.
private WrappedVector drawInitialVelocity() {
ReadableVector mass = preconditioning.mass;
double[] velocity = new double[mass.getDim()];
for (int i = 0, len = velocity.length; i < len; i++) {
velocity[i] = MathUtils.nextGaussian() / Math.sqrt(mass.get(i));
}
if (mask != null) {
applyMask(velocity);
}
return new WrappedVector.Raw(velocity);
}
use of dr.math.matrixAlgebra.ReadableVector in project beast-mcmc by beast-dev.
the class BouncyParticleOperator method updateVelocity.
private static void updateVelocity(WrappedVector velocity, WrappedVector gradient, ReadableVector mass) {
ReadableVector gDivM = new ReadableVector.Quotient(gradient, mass);
double vg = innerProduct(velocity, gradient);
double ggDivM = innerProduct(gradient, gDivM);
for (int i = 0, len = velocity.getDim(); i < len; ++i) {
velocity.set(i, velocity.get(i) - 2 * vg / ggDivM * gDivM.get(i));
}
}
Aggregations