use of org.apache.ignite.ml.nn.architecture.TransformationLayerArchitecture in project ignite by apache.
the class MultilayerPerceptron method initLayers.
/**
* Init layers parameters with initializer.
*
* @param initializer Parameters initializer.
*/
private void initLayers(MLPInitializer initializer) {
int prevSize = architecture.inputSize();
for (int i = 1; i < architecture.layersCount(); i++) {
TransformationLayerArchitecture layerCfg = architecture.transformationLayerArchitecture(i);
int neuronsCnt = layerCfg.neuronsCount();
DenseLocalOnHeapMatrix weights = new DenseLocalOnHeapMatrix(neuronsCnt, prevSize);
initializer.initWeights(weights);
DenseLocalOnHeapVector biases = null;
if (layerCfg.hasBias()) {
biases = new DenseLocalOnHeapVector(neuronsCnt);
initializer.initBiases(biases);
}
layers.add(new MLPLayer(weights, biases));
prevSize = layerCfg.neuronsCount();
}
}
use of org.apache.ignite.ml.nn.architecture.TransformationLayerArchitecture in project ignite by apache.
the class MultilayerPerceptron method forwardPass.
/**
* Perform forward pass and if needed write state of outputs of each layer.
*
* @param val Value to perform computation on.
* @param state State object to write state into.
* @param writeState Flag indicating need to write state.
*/
public Matrix forwardPass(Matrix val, MLPState state, boolean writeState) {
Matrix res = val;
if (below != null)
res = below.forwardPass(val, state, writeState);
for (int i = 1; i < architecture.layersCount(); i++) {
MLPLayer curLayer = layers.get(i - 1);
res = curLayer.weights.times(res);
TransformationLayerArchitecture layerCfg = this.architecture.transformationLayerArchitecture(i);
if (layerCfg.hasBias()) {
ReplicatedVectorMatrix biasesMatrix = new ReplicatedVectorMatrix(biases(i), res.columnSize(), true);
res = res.plus(biasesMatrix);
}
state.linearOutput.add(res);
// If we do not write state, we can overwrite result.
if (writeState)
res = res.copy();
res = res.map(layerCfg.activationFunction());
state.activatorsOutput.add(res);
}
return res;
}
Aggregations