Search in sources :

Example 16 with Matrix

use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.

the class CovarianceMatricesAggregator method add.

/**
 * @param x Feature vector (xi).
 * @param pcxi P(c|xi) for GMM component "c" and vector xi.
 */
void add(Vector x, double pcxi) {
    Matrix deltaCol = x.minus(mean).toMatrix(false);
    Matrix weightedCovComponent = deltaCol.times(deltaCol.transpose()).times(pcxi);
    weightedSum = weightedSum == null ? weightedCovComponent : weightedSum.plus(weightedCovComponent);
    rowCnt += 1;
}
Also used : Matrix(org.apache.ignite.ml.math.primitives.matrix.Matrix)

Example 17 with Matrix

use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.

the class TestUtils method assertEquals.

/**
 * Verifies that two matrices are close (1-norm).
 *
 * @param msg The identifying message for the assertion error.
 * @param exp Expected matrix.
 * @param actual Actual matrix.
 * @param tolerance Comparison tolerance value.
 */
public static void assertEquals(String msg, Matrix exp, Matrix actual, double tolerance) {
    Assert.assertNotNull(msg + "\nObserved should not be null", actual);
    if (exp.columnSize() != actual.columnSize() || exp.rowSize() != actual.rowSize()) {
        String msgBuff = msg + "\nObserved has incorrect dimensions." + "\nobserved is " + actual.rowSize() + " x " + actual.columnSize() + "\nexpected " + exp.rowSize() + " x " + exp.columnSize();
        Assert.fail(msgBuff);
    }
    Matrix delta = exp.minus(actual);
    if (maximumAbsoluteRowSum(delta) >= tolerance) {
        String msgBuff = msg + "\nExpected: " + exp + "\nObserved: " + actual + "\nexpected - observed: " + delta;
        Assert.fail(msgBuff);
    }
}
Also used : Matrix(org.apache.ignite.ml.math.primitives.matrix.Matrix)

Example 18 with Matrix

use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.

the class MultilayerPerceptron method readFromVector.

/**
 * Read matrix with given dimensions from vector starting with offset and return new offset position
 * which is last matrix entry position + 1.
 *
 * @param v Vector to read from.
 * @param rows Count of rows of matrix to read.
 * @param cols Count of columns of matrix to read.
 * @param off Start read position.
 * @return New offset position which is last matrix entry position + 1.
 */
private IgniteBiTuple<Integer, Matrix> readFromVector(Vector v, int rows, int cols, int off) {
    Matrix mtx = new DenseMatrix(rows, cols);
    int size = rows * cols;
    for (int i = 0; i < size; i++) mtx.setX(i / cols, i % cols, v.getX(off + i));
    return new IgniteBiTuple<>(off + size, mtx);
}
Also used : Matrix(org.apache.ignite.ml.math.primitives.matrix.Matrix) DenseMatrix(org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) DenseMatrix(org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix)

Example 19 with Matrix

use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.

the class MultilayerPerceptron method differentiateByParameters.

/**
 * {@inheritDoc}
 */
@Override
public Vector differentiateByParameters(IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss, Matrix inputsBatch, Matrix truthBatch) {
    // Backpropagation algorithm is used here.
    int batchSize = inputsBatch.columnSize();
    double invBatchSize = 1 / (double) batchSize;
    int lastLayer = layersCount() - 1;
    MLPState mlpState = computeState(inputsBatch);
    Matrix dz = null;
    List<MLPLayer> layersParameters = new LinkedList<>();
    for (int layer = lastLayer; layer > 0; layer--) {
        Matrix z = mlpState.linearOutput(layer).copy();
        Matrix dSigmaDz = differentiateNonlinearity(z, architecture().transformationLayerArchitecture(layer).activationFunction());
        if (layer == lastLayer) {
            Matrix sigma = mlpState.activatorsOutput(lastLayer).copy();
            Matrix dLossDSigma = differentiateLoss(truthBatch, sigma, loss);
            dz = elementWiseTimes(dLossDSigma, dSigmaDz);
        } else {
            dz = weights(layer + 1).transpose().times(dz);
            dz = elementWiseTimes(dz, dSigmaDz);
        }
        Matrix a = mlpState.activatorsOutput(layer - 1);
        Matrix dw = dz.times(a.transpose()).times(invBatchSize);
        Vector db = null;
        if (hasBiases(layer))
            db = dz.foldRows(Vector::sum).times(invBatchSize);
        // Because we go from last layer, add each layer to the beginning.
        layersParameters.add(0, new MLPLayer(dw, db));
    }
    return paramsAsVector(layersParameters);
}
Also used : Matrix(org.apache.ignite.ml.math.primitives.matrix.Matrix) DenseMatrix(org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix) Vector(org.apache.ignite.ml.math.primitives.vector.Vector) DenseVector(org.apache.ignite.ml.math.primitives.vector.impl.DenseVector) LinkedList(java.util.LinkedList)

Example 20 with Matrix

use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.

the class MultilayerPerceptron method differentiateLoss.

/**
 * Differentiate loss.
 *
 * @param groundTruth Ground truth values.
 * @param lastLayerOutput Last layer output.
 * @param loss Loss function.
 * @return Gradients matrix.
 */
private Matrix differentiateLoss(Matrix groundTruth, Matrix lastLayerOutput, IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss) {
    Matrix diff = groundTruth.like(groundTruth.rowSize(), groundTruth.columnSize());
    for (int col = 0; col < groundTruth.columnSize(); col++) {
        // TODO: IGNITE-7155 Couldn't use views here because
        // copy on views doesn't do actual copy and all changes are propagated to original.
        Vector gtCol = groundTruth.getCol(col);
        Vector predCol = lastLayerOutput.getCol(col);
        diff.assignColumn(col, loss.apply(gtCol).differential(predCol));
    }
    return diff;
}
Also used : Matrix(org.apache.ignite.ml.math.primitives.matrix.Matrix) DenseMatrix(org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix) Vector(org.apache.ignite.ml.math.primitives.vector.Vector) DenseVector(org.apache.ignite.ml.math.primitives.vector.impl.DenseVector)

Aggregations

Matrix (org.apache.ignite.ml.math.primitives.matrix.Matrix)31 DenseMatrix (org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix)24 Test (org.junit.Test)12 Vector (org.apache.ignite.ml.math.primitives.vector.Vector)8 MLPArchitecture (org.apache.ignite.ml.nn.architecture.MLPArchitecture)8 DenseVector (org.apache.ignite.ml.math.primitives.vector.impl.DenseVector)7 SparseMatrix (org.apache.ignite.ml.math.primitives.matrix.impl.SparseMatrix)6 VectorizedViewMatrix (org.apache.ignite.ml.math.primitives.vector.impl.VectorizedViewMatrix)5 RendezvousAffinityFunction (org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction)2 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)2 ViewMatrix (org.apache.ignite.ml.math.primitives.matrix.impl.ViewMatrix)2 DelegatingVector (org.apache.ignite.ml.math.primitives.vector.impl.DelegatingVector)2 SparseVector (org.apache.ignite.ml.math.primitives.vector.impl.SparseVector)2 MLPTrainer (org.apache.ignite.ml.nn.MLPTrainer)2 SimpleGDParameterUpdate (org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate)2 SimpleGDUpdateCalculator (org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator)2 LabeledVector (org.apache.ignite.ml.structures.LabeledVector)2 ArrayList (java.util.ArrayList)1 LinkedList (java.util.LinkedList)1 Random (java.util.Random)1