use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.
the class CovarianceMatricesAggregator method add.
/**
* @param x Feature vector (xi).
* @param pcxi P(c|xi) for GMM component "c" and vector xi.
*/
void add(Vector x, double pcxi) {
Matrix deltaCol = x.minus(mean).toMatrix(false);
Matrix weightedCovComponent = deltaCol.times(deltaCol.transpose()).times(pcxi);
weightedSum = weightedSum == null ? weightedCovComponent : weightedSum.plus(weightedCovComponent);
rowCnt += 1;
}
use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.
the class TestUtils method assertEquals.
/**
* Verifies that two matrices are close (1-norm).
*
* @param msg The identifying message for the assertion error.
* @param exp Expected matrix.
* @param actual Actual matrix.
* @param tolerance Comparison tolerance value.
*/
public static void assertEquals(String msg, Matrix exp, Matrix actual, double tolerance) {
Assert.assertNotNull(msg + "\nObserved should not be null", actual);
if (exp.columnSize() != actual.columnSize() || exp.rowSize() != actual.rowSize()) {
String msgBuff = msg + "\nObserved has incorrect dimensions." + "\nobserved is " + actual.rowSize() + " x " + actual.columnSize() + "\nexpected " + exp.rowSize() + " x " + exp.columnSize();
Assert.fail(msgBuff);
}
Matrix delta = exp.minus(actual);
if (maximumAbsoluteRowSum(delta) >= tolerance) {
String msgBuff = msg + "\nExpected: " + exp + "\nObserved: " + actual + "\nexpected - observed: " + delta;
Assert.fail(msgBuff);
}
}
use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.
the class MultilayerPerceptron method readFromVector.
/**
* Read matrix with given dimensions from vector starting with offset and return new offset position
* which is last matrix entry position + 1.
*
* @param v Vector to read from.
* @param rows Count of rows of matrix to read.
* @param cols Count of columns of matrix to read.
* @param off Start read position.
* @return New offset position which is last matrix entry position + 1.
*/
private IgniteBiTuple<Integer, Matrix> readFromVector(Vector v, int rows, int cols, int off) {
Matrix mtx = new DenseMatrix(rows, cols);
int size = rows * cols;
for (int i = 0; i < size; i++) mtx.setX(i / cols, i % cols, v.getX(off + i));
return new IgniteBiTuple<>(off + size, mtx);
}
use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.
the class MultilayerPerceptron method differentiateByParameters.
/**
* {@inheritDoc}
*/
@Override
public Vector differentiateByParameters(IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss, Matrix inputsBatch, Matrix truthBatch) {
// Backpropagation algorithm is used here.
int batchSize = inputsBatch.columnSize();
double invBatchSize = 1 / (double) batchSize;
int lastLayer = layersCount() - 1;
MLPState mlpState = computeState(inputsBatch);
Matrix dz = null;
List<MLPLayer> layersParameters = new LinkedList<>();
for (int layer = lastLayer; layer > 0; layer--) {
Matrix z = mlpState.linearOutput(layer).copy();
Matrix dSigmaDz = differentiateNonlinearity(z, architecture().transformationLayerArchitecture(layer).activationFunction());
if (layer == lastLayer) {
Matrix sigma = mlpState.activatorsOutput(lastLayer).copy();
Matrix dLossDSigma = differentiateLoss(truthBatch, sigma, loss);
dz = elementWiseTimes(dLossDSigma, dSigmaDz);
} else {
dz = weights(layer + 1).transpose().times(dz);
dz = elementWiseTimes(dz, dSigmaDz);
}
Matrix a = mlpState.activatorsOutput(layer - 1);
Matrix dw = dz.times(a.transpose()).times(invBatchSize);
Vector db = null;
if (hasBiases(layer))
db = dz.foldRows(Vector::sum).times(invBatchSize);
// Because we go from last layer, add each layer to the beginning.
layersParameters.add(0, new MLPLayer(dw, db));
}
return paramsAsVector(layersParameters);
}
use of org.apache.ignite.ml.math.primitives.matrix.Matrix in project ignite by apache.
the class MultilayerPerceptron method differentiateLoss.
/**
* Differentiate loss.
*
* @param groundTruth Ground truth values.
* @param lastLayerOutput Last layer output.
* @param loss Loss function.
* @return Gradients matrix.
*/
private Matrix differentiateLoss(Matrix groundTruth, Matrix lastLayerOutput, IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss) {
Matrix diff = groundTruth.like(groundTruth.rowSize(), groundTruth.columnSize());
for (int col = 0; col < groundTruth.columnSize(); col++) {
// TODO: IGNITE-7155 Couldn't use views here because
// copy on views doesn't do actual copy and all changes are propagated to original.
Vector gtCol = groundTruth.getCol(col);
Vector predCol = lastLayerOutput.getCol(col);
diff.assignColumn(col, loss.apply(gtCol).differential(predCol));
}
return diff;
}
Aggregations