Search in sources :

Example 1 with WrappedVector

use of dr.math.matrixAlgebra.WrappedVector in project beast-mcmc by beast-dev.

the class TreePrecisionColumnProvider method setDataModelAndGetColumn.

private double[] setDataModelAndGetColumn(int index) {
    Parameter parameter = tipData.getParameter();
    WrappedVector savedParameter;
    if (RESET_DATA) {
        savedParameter = new WrappedVector.Raw(parameter.getParameterValues());
    }
    if (tipData instanceof ContinuousTraitDataModel) {
        setParameter(makeElementaryVector(index), tipData.getParameter());
    } else if (tipData instanceof ElementaryVectorDataModel) {
        ElementaryVectorDataModel elementaryData = (ElementaryVectorDataModel) tipData;
        int tip = index / dimTrait;
        int dim = index % dimTrait;
        elementaryData.setTipTraitDimParameters(tip, 0, dim);
    } else {
        throw new RuntimeException("Not yet implemented");
    }
    double[] column = productProvider.getProduct(parameter);
    if (RESET_DATA) {
        setParameter(savedParameter, parameter);
    }
    return column;
}
Also used : ContinuousTraitDataModel(dr.evomodel.treedatalikelihood.continuous.ContinuousTraitDataModel) Utils.setParameter(dr.math.matrixAlgebra.ReadableVector.Utils.setParameter) Parameter(dr.inference.model.Parameter) WrappedVector(dr.math.matrixAlgebra.WrappedVector) ElementaryVectorDataModel(dr.evomodel.treedatalikelihood.continuous.ElementaryVectorDataModel)

Example 2 with WrappedVector

use of dr.math.matrixAlgebra.WrappedVector in project beast-mcmc by beast-dev.

the class IntegratedFactorAnalysisLikelihood method computePartialAndRemainderForOneTaxon.

private void computePartialAndRemainderForOneTaxon(int taxon, DenseMatrix64F precision, DenseMatrix64F variance) {
    final int partialsOffset = dimPartial * taxon;
    // Work with mean in-place
    final WrappedVector mean = new WrappedVector.Raw(partials, partialsOffset, numFactors);
    computePrecisionForTaxon(precision, taxon, numFactors);
    fillInMeanForTaxon(mean, precision, taxon);
    if (DEBUG) {
        System.err.println("taxon " + taxon);
        System.err.println("\tprecision: " + precision);
    }
    double constant;
    double nuggetDensity = 0;
    if (observedDimensions[taxon] == 0) {
        makeCompletedUnobserved(precision, 0);
        makeCompletedUnobserved(variance, Double.POSITIVE_INFINITY);
        constant = 0.0;
    } else {
        if (DEBUG) {
            System.err.println("\tmean: " + mean);
        // System.err.println("\n");
        }
        // final double factorLogDeterminant = ci.getLogDeterminant();
        double traitLogDeterminant = getTraitLogDeterminant(taxon);
        // final double logDetChange = traitLogDeterminant - factorLogDeterminant;
        final double factorInnerProduct = computeFactorInnerProduct(mean, precision);
        final double traitInnerProduct = USE_INNER_PRODUCT_CACHE ? traitInnerProducts[taxon] : computeTraitInnerProduct(taxon);
        final double innerProductChange = traitInnerProduct - factorInnerProduct;
        if (DEBUG) {
            System.err.println("fIP: " + factorInnerProduct);
            System.err.println("tIP: " + traitInnerProduct);
            // System.err.println("fDet: " + factorLogDeterminant);
            System.err.println("tDet: " + traitLogDeterminant);
            // System.err.println("deltaDim: " + dimensionChange)
            System.err.println(" deltaIP: " + innerProductChange + "\n\n");
        }
        // constant = 0.5 * (logDetChange - innerProductChange) - LOG_SQRT_2_PI * (dimensionChange) -
        // nuggetDensity;
        constant = 0.5 * (traitLogDeterminant - innerProductChange) - LOG_SQRT_2_PI * (observedDimensions[taxon]) - nuggetDensity;
    }
    // store in precision, variance and normalization constant
    unwrap(precision, partials, partialsOffset + numFactors);
    PrecisionType.FULL.fillEffDimInPartials(partials, partialsOffset, 0, numFactors);
    if (STORE_VARIANCE) {
        safeInvert2(precision, variance, true);
        unwrap(variance, partials, partialsOffset + numFactors + numFactors * numFactors);
    }
    normalizationConstants[taxon] = constant;
}
Also used : WrappedVector(dr.math.matrixAlgebra.WrappedVector)

Example 3 with WrappedVector

use of dr.math.matrixAlgebra.WrappedVector in project beast-mcmc by beast-dev.

the class SafeMultivariateIntegrator method partialMean.

void partialMean(int ibo, int jbo, int kbo, int ido, int jdo) {
    if (TIMING) {
        startTime("peel4");
    }
    final double[] tmp = vectorPMk;
    weightedSum(partials, ibo, matrixPip, partials, jbo, matrixPjp, dimTrait, tmp);
    final WrappedVector kPartials = new WrappedVector.Raw(partials, kbo, dimTrait);
    final WrappedVector wrapTmp = new WrappedVector.Raw(tmp, 0, dimTrait);
    safeSolve(matrixPk, wrapTmp, kPartials, false);
    if (TIMING) {
        endTime("peel4");
        startTime("peel5");
    }
}
Also used : WrappedVector(dr.math.matrixAlgebra.WrappedVector)

Example 4 with WrappedVector

use of dr.math.matrixAlgebra.WrappedVector in project beast-mcmc by beast-dev.

the class MultivariateConditionalOnTipsRealizedDelegate method simulateTraitForExternalNode.

private void simulateTraitForExternalNode(final int nodeIndex, final int traitIndex, final int offsetSample, final int offsetParent, final int offsetPartial, final double branchPrecision) {
    final DenseMatrix64F P0 = wrap(partialNodeBuffer, offsetPartial + dimTrait, dimTrait, dimTrait);
    final int missingCount = countFiniteDiagonals(P0);
    if (missingCount == 0) {
        // Completely observed
        System.arraycopy(partialNodeBuffer, offsetPartial, sample, offsetSample, dimTrait);
    } else {
        final int zeroCount = countZeroDiagonals(P0);
        if (zeroCount == dimTrait) {
            // All missing completely at random
            // TODO: This is N(X_pa(j), l_j V_root). Why not N(X_pa(j), V_branch) ?
            final double sqrtScale = Math.sqrt(1.0 / branchPrecision);
            if (NEW_TIP_WITH_NO_DATA) {
                // final ReadableVector parentSample = new WrappedVector.Raw(sample, offsetParent, dimTrait);
                final ReadableVector M;
                M = getMeanBranch(offsetParent);
                // if (hasNoDrift) {
                // M = parentSample;
                // } else {
                // M = getMeanWithDrift(parentSample,
                // new WrappedVector.Raw(displacementBuffer, 0, dimTrait));
                // }
                MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
                M, // input variance
                new WrappedMatrix.ArrayOfArray(cholesky), // input variance
                sqrtScale, new WrappedVector.Raw(sample, offsetSample, dimTrait), tmpEpsilon);
            } else {
                // TODO Drift?
                assert (!likelihoodDelegate.getDiffusionProcessDelegate().hasDrift());
                MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
                sample, // input mean
                offsetParent, // input variance
                cholesky, // input variance
                sqrtScale, // output sample
                sample, // output sample
                offsetSample, tmpEpsilon);
            }
        } else {
            if (missingCount == dimTrait) {
                // All missing, but not completely at random
                simulateTraitForInternalNode(offsetSample, offsetParent, offsetPartial, branchPrecision);
            } else {
                // Partially observed
                // copy observed values
                System.arraycopy(partialNodeBuffer, offsetPartial, sample, offsetSample, dimTrait);
                final PartiallyMissingInformation.HashedIntArray indices = missingInformation.getMissingIndices(nodeIndex, traitIndex);
                final int[] observed = indices.getComplement();
                final int[] missing = indices.getArray();
                final DenseMatrix64F V1 = getVarianceBranch(branchPrecision);
                // final DenseMatrix64F V1 = new DenseMatrix64F(dimTrait, dimTrait);
                // CommonOps.scale(1.0 / branchPrecision, Vd, V1);
                ConditionalVarianceAndTransform2 transform = new ConditionalVarianceAndTransform2(V1, missing, observed);
                // TODO Cache (via delegated function)
                final DenseMatrix64F cP0 = new DenseMatrix64F(missing.length, missing.length);
                gatherRowsAndColumns(P0, cP0, missing, missing);
                final WrappedVector cM2 = transform.getConditionalMean(// Tip value
                partialNodeBuffer, // Tip value
                offsetPartial, sample, // Parent value
                offsetParent);
                final DenseMatrix64F cP1 = transform.getConditionalPrecision();
                final DenseMatrix64F cP2 = new DenseMatrix64F(missing.length, missing.length);
                final DenseMatrix64F cV2 = new DenseMatrix64F(missing.length, missing.length);
                // TODO: Shouldn't P0 = 0 always in this situation ?
                CommonOps.add(cP0, cP1, cP2);
                safeInvert2(cP2, cV2, false);
                if (NEW_CHOLESKY) {
                    DenseMatrix64F cC2 = getCholeskyOfVariance(cV2, missing.length);
                    MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
                    cM2, // input variance
                    new WrappedMatrix.WrappedDenseMatrix(cC2), // input variance
                    1.0, // output sample
                    new WrappedVector.Indexed(sample, offsetSample, missing, missing.length), tmpEpsilon);
                } else {
                    double[][] cC2 = getCholeskyOfVariance(cV2.getData(), missing.length);
                    MultivariateNormalDistribution.nextMultivariateNormalCholesky(// input mean
                    cM2, // input variance
                    new WrappedMatrix.ArrayOfArray(cC2), // input variance
                    1.0, // output sample
                    new WrappedVector.Indexed(sample, offsetSample, missing, missing.length), tmpEpsilon);
                }
                if (DEBUG) {
                    final WrappedVector M0 = new WrappedVector.Raw(partialNodeBuffer, offsetPartial, dimTrait);
                    final WrappedVector M1 = new WrappedVector.Raw(sample, offsetParent, dimTrait);
                    final DenseMatrix64F P1 = new DenseMatrix64F(dimTrait, dimTrait);
                    CommonOps.scale(branchPrecision, Pd, P1);
                    final WrappedVector newSample = new WrappedVector.Raw(sample, offsetSample, dimTrait);
                    System.err.println("sT F E N");
                    System.err.println("M0: " + M0);
                    System.err.println("P0: " + P0);
                    System.err.println("");
                    System.err.println("M1: " + M1);
                    System.err.println("P1: " + P1);
                    System.err.println("");
                    System.err.println("cP0: " + cP0);
                    System.err.println("cM2: " + cM2);
                    System.err.println("cP1: " + cP1);
                    System.err.println("cP2: " + cP2);
                    System.err.println("cV2: " + cV2);
                    // System.err.println("cC2: " + new Matrix(cC2));
                    System.err.println("SS: " + newSample);
                    System.err.println("");
                }
            }
        }
    }
}
Also used : ReadableVector(dr.math.matrixAlgebra.ReadableVector) WrappedVector(dr.math.matrixAlgebra.WrappedVector) DenseMatrix64F(org.ejml.data.DenseMatrix64F) WrappedMatrix(dr.math.matrixAlgebra.WrappedMatrix)

Example 5 with WrappedVector

use of dr.math.matrixAlgebra.WrappedVector in project beast-mcmc by beast-dev.

the class EllipticalSliceOperator method rotateNd.

// private static void rotate2d(double[] x) {
// final double theta = -Math.atan2(x[1], x[0]); // TODO Compute norm and avoid transcendentals
// final double sin = Math.sin(theta);
// final double cos = Math.cos(theta);
// 
// int k = 0;
// for (int i = 0; i < x.length / 2; ++i) {
// double newX = x[k + 0] * cos - x[k + 1] * sin;
// double newY = x[k + 1] * cos + x[k + 0] * sin;
// x[k + 0] = newX;
// x[k + 1] = newY;
// k += 2;
// }
// }
private static void rotateNd(double[] x, int dim) {
    // Get first `dim` locations
    DenseMatrix64F matrix = new DenseMatrix64F(dim, dim);
    for (int row = 0; row < dim; ++row) {
        for (int col = 0; col < dim; ++col) {
            matrix.set(row, col, x[col * dim + row]);
        }
    }
    // Do a QR decomposition
    QRDecomposition<DenseMatrix64F> qr = DecompositionFactory.qr(dim, dim);
    qr.decompose(matrix);
    DenseMatrix64F qm = qr.getQ(null, true);
    DenseMatrix64F rm = qr.getR(null, true);
    // Reflection invariance
    if (rm.get(0, 0) < 0) {
        CommonOps.scale(-1, rm);
        CommonOps.scale(-1, qm);
    }
    // Compute Q^{-1}
    DenseMatrix64F qInv = new DenseMatrix64F(dim, dim);
    CommonOps.transpose(qm, qInv);
    // Apply to each location
    for (int location = 0; location < x.length / dim; ++location) {
        WrappedVector locationVector = new WrappedVector.Raw(x, location * dim, dim);
        MissingOps.matrixVectorMultiple(qInv, locationVector, locationVector, dim);
    }
}
Also used : WrappedVector(dr.math.matrixAlgebra.WrappedVector) DenseMatrix64F(org.ejml.data.DenseMatrix64F)

Aggregations

WrappedVector (dr.math.matrixAlgebra.WrappedVector)19 DenseMatrix64F (org.ejml.data.DenseMatrix64F)4 ReadableVector (dr.math.matrixAlgebra.ReadableVector)2 WrappedMatrix (dr.math.matrixAlgebra.WrappedMatrix)2 ContinuousTraitDataModel (dr.evomodel.treedatalikelihood.continuous.ContinuousTraitDataModel)1 ElementaryVectorDataModel (dr.evomodel.treedatalikelihood.continuous.ElementaryVectorDataModel)1 Parameter (dr.inference.model.Parameter)1 Utils.setParameter (dr.math.matrixAlgebra.ReadableVector.Utils.setParameter)1