use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class IgniteColumnDecisionTreeGiniBenchmark method test.
/**
* {@inheritDoc}
*/
@Override
public boolean test(Map<Object, Object> ctx) throws Exception {
// Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread
// because we create ignite cache internally.
IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), this.getClass().getSimpleName(), new Runnable() {
/**
* {@inheritDoc}
*/
@Override
public void run() {
// IMPL NOTE originally taken from ColumnDecisionTreeTrainerTest#testCacheMixedGini
int totalPts = 1 << 10;
int featCnt = 2;
HashMap<Integer, Integer> catsInfo = new HashMap<>();
catsInfo.put(1, 3);
SplitDataGenerator<DenseLocalOnHeapVector> gen = new SplitDataGenerator<>(featCnt, catsInfo, () -> new DenseLocalOnHeapVector(featCnt + 1)).split(0, 1, new int[] { 0, 2 }).split(1, 0, -10.0);
gen.testByGen(totalPts, ContinuousSplitCalculators.GINI.apply(ignite), RegionCalculators.GINI, RegionCalculators.MEAN, ignite);
}
});
igniteThread.start();
igniteThread.join();
return true;
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class IgniteColumnDecisionTreeVarianceBenchmark method test.
/**
* {@inheritDoc}
*/
@Override
public boolean test(Map<Object, Object> ctx) throws Exception {
// Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread
// because we create ignite cache internally.
IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), this.getClass().getSimpleName(), new Runnable() {
/**
* {@inheritDoc}
*/
@Override
public void run() {
// IMPL NOTE originally taken from ColumnDecisionTreeTrainerTest#testCacheMixed
int totalPts = 1 << 10;
int featCnt = 2;
HashMap<Integer, Integer> catsInfo = new HashMap<>();
catsInfo.put(1, 3);
SplitDataGenerator<DenseLocalOnHeapVector> gen = new SplitDataGenerator<>(featCnt, catsInfo, () -> new DenseLocalOnHeapVector(featCnt + 1)).split(0, 1, new int[] { 0, 2 }).split(1, 0, -10.0);
gen.testByGen(totalPts, ContinuousSplitCalculators.VARIANCE, RegionCalculators.VARIANCE, RegionCalculators.MEAN, ignite);
}
});
igniteThread.start();
igniteThread.join();
return true;
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class AbstractMatrix method getRow.
/**
* {@inheritDoc}
*/
@Override
public Vector getRow(int row) {
checkRowIndex(row);
Vector res = new DenseLocalOnHeapVector(columnSize());
for (int i = 0; i < columnSize(); i++) res.setX(i, getX(row, i));
return res;
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class MultilayerPerceptron method paramsAsVector.
/**
* Flatten this MLP parameters as vector.
*
* @param layersParams List of layers parameters.
* @return This MLP parameters as vector.
*/
protected Vector paramsAsVector(List<MLPLayer> layersParams) {
int off = 0;
Vector res = new DenseLocalOnHeapVector(architecture().parametersCount());
for (MLPLayer layerParams : layersParams) {
off = writeToVector(res, layerParams.weights, off);
if (layerParams.biases != null)
off = writeToVector(res, layerParams.biases, off);
}
return res;
}
use of org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector in project ignite by apache.
the class MultilayerPerceptron method initLayers.
/**
* Init layers parameters with initializer.
*
* @param initializer Parameters initializer.
*/
private void initLayers(MLPInitializer initializer) {
int prevSize = architecture.inputSize();
for (int i = 1; i < architecture.layersCount(); i++) {
TransformationLayerArchitecture layerCfg = architecture.transformationLayerArchitecture(i);
int neuronsCnt = layerCfg.neuronsCount();
DenseLocalOnHeapMatrix weights = new DenseLocalOnHeapMatrix(neuronsCnt, prevSize);
initializer.initWeights(weights);
DenseLocalOnHeapVector biases = null;
if (layerCfg.hasBias()) {
biases = new DenseLocalOnHeapVector(neuronsCnt);
initializer.initBiases(biases);
}
layers.add(new MLPLayer(weights, biases));
prevSize = layerCfg.neuronsCount();
}
}
Aggregations