use of hex.NeuralNet in project h2o-2 by h2oai.
the class NeuralNetMnistPretrain method build.
@Override
protected Layer[] build(Vec[] data, Vec labels, VecsInput inputStats, VecSoftmax outputStats) {
Layer[] ls = new Layer[4];
ls[0] = new VecsInput(data, inputStats);
// ls[1] = new Layer.RectifierDropout(1024);
// ls[2] = new Layer.RectifierDropout(1024);
ls[1] = new Layer.Tanh(50);
ls[2] = new Layer.Tanh(50);
ls[3] = new VecSoftmax(labels, outputStats);
// Parameters for MNIST run
NeuralNet p = new NeuralNet();
//only used for NN run after pretraining
p.rate = 0.01;
p.activation = NeuralNet.Activation.Tanh;
p.loss = NeuralNet.Loss.CrossEntropy;
// p.rate_annealing = 1e-6f;
// p.max_w2 = 15;
// p.momentum_start = 0.5f;
// p.momentum_ramp = 60000 * 300;
// p.momentum_stable = 0.99f;
// p.l1 = .00001f;
// p.l2 = .00f;
p.initial_weight_distribution = NeuralNet.InitialWeightDistribution.UniformAdaptive;
for (int i = 0; i < ls.length; i++) {
ls[i].init(ls, i, p);
}
return ls;
}
use of hex.NeuralNet in project h2o-2 by h2oai.
the class NeuralNetMnist method build.
protected Layer[] build(Vec[] data, Vec labels, VecsInput inputStats, VecSoftmax outputStats) {
//same parameters as in test_NN_mnist.py
Layer[] ls = new Layer[5];
ls[0] = new VecsInput(data, inputStats);
ls[1] = new Layer.RectifierDropout(117);
ls[2] = new Layer.RectifierDropout(131);
ls[3] = new Layer.RectifierDropout(129);
ls[ls.length - 1] = new VecSoftmax(labels, outputStats);
NeuralNet p = new NeuralNet();
p.seed = 98037452452l;
p.rate = 0.005;
p.rate_annealing = 1e-6;
p.activation = NeuralNet.Activation.RectifierWithDropout;
p.loss = NeuralNet.Loss.CrossEntropy;
p.input_dropout_ratio = 0.2;
p.max_w2 = 15;
p.epochs = 2;
p.l1 = 1e-5;
p.l2 = 0.0000001;
p.momentum_start = 0.5;
p.momentum_ramp = 100000;
p.momentum_stable = 0.99;
p.initial_weight_distribution = NeuralNet.InitialWeightDistribution.UniformAdaptive;
p.classification = true;
p.diagnostics = true;
p.expert_mode = true;
for (int i = 0; i < ls.length; i++) {
ls[i].init(ls, i, p);
}
return ls;
}
use of hex.NeuralNet in project h2o-2 by h2oai.
the class NeuralNetProgressPage method toHTML.
@Override
public boolean toHTML(StringBuilder sb) {
Job jjob = Job.findJob(job_key);
if (jjob == null)
return true;
NeuralNet.NeuralNetModel m = UKV.get(jjob.dest());
if (m != null)
m.generateHTML("NeuralNet Model", sb);
else
DocGen.HTML.paragraph(sb, "Pending...");
return true;
}
use of hex.NeuralNet in project h2o-2 by h2oai.
the class DeepLearningVsNeuralNet method compare.
@Ignore
@Test
public void compare() throws Exception {
final long seed = 0xc0ffee;
Random rng = new Random(seed);
DeepLearning.Activation[] activations = { DeepLearning.Activation.Maxout, DeepLearning.Activation.MaxoutWithDropout, DeepLearning.Activation.Tanh, DeepLearning.Activation.TanhWithDropout, DeepLearning.Activation.Rectifier, DeepLearning.Activation.RectifierWithDropout };
DeepLearning.Loss[] losses = { DeepLearning.Loss.MeanSquare, DeepLearning.Loss.CrossEntropy };
DeepLearning.InitialWeightDistribution[] dists = { DeepLearning.InitialWeightDistribution.Normal, DeepLearning.InitialWeightDistribution.Uniform, DeepLearning.InitialWeightDistribution.UniformAdaptive };
double[] initial_weight_scales = { 1e-3 + 1e-2 * rng.nextFloat() };
double[] holdout_ratios = { 0.7 + 0.2 * rng.nextFloat() };
int[][] hiddens = { { 1 }, { 1 + rng.nextInt(50) }, { 17, 13 }, { 20, 10, 5 } };
double[] rates = { 0.005 + 1e-2 * rng.nextFloat() };
int[] epochs = { 5 + rng.nextInt(5) };
double[] input_dropouts = { 0, rng.nextFloat() * 0.5 };
double p0 = 0.5 * rng.nextFloat();
long pR = 1000 + rng.nextInt(1000);
double p1 = 0.5 + 0.49 * rng.nextFloat();
double l1 = 1e-5 * rng.nextFloat();
double l2 = 1e-5 * rng.nextFloat();
// rng.nextInt(50);
float max_w2 = Float.POSITIVE_INFINITY;
double rate_annealing = 1e-7 + rng.nextFloat() * 1e-6;
boolean threaded = false;
int num_repeats = 1;
// TODO: test that Deep Learning and NeuralNet agree for Mnist dataset
// String[] files = { "smalldata/mnist/train.csv" };
// hiddens = new int[][]{ {50,50} };
// threaded = true;
// num_repeats = 5;
// TODO: test that Deep Learning and NeuralNet agree for covtype dataset
// String[] files = { "smalldata/covtype/covtype.20k.data.my" };
// hiddens = new int[][]{ {100,100} };
// epochs = new int[]{ 50 };
// threaded = true;
// num_repeats = 2;
String[] files = { "smalldata/iris/iris.csv", "smalldata/neural/two_spiral.data" };
for (DeepLearning.Activation activation : activations) {
for (DeepLearning.Loss loss : losses) {
for (DeepLearning.InitialWeightDistribution dist : dists) {
for (double scale : initial_weight_scales) {
for (double holdout_ratio : holdout_ratios) {
for (double input_dropout : input_dropouts) {
for (int[] hidden : hiddens) {
for (int epoch : epochs) {
for (double rate : rates) {
for (String file : files) {
for (boolean fast_mode : new boolean[] { true, false }) {
float reftrainerr = 0, trainerr = 0;
float reftesterr = 0, testerr = 0;
float[] a = new float[hidden.length + 2];
float[] b = new float[hidden.length + 2];
float[] ba = new float[hidden.length + 2];
float[] bb = new float[hidden.length + 2];
long numweights = 0, numbiases = 0;
for (int repeat = 0; repeat < num_repeats; ++repeat) {
long myseed = seed + repeat;
Log.info("");
Log.info("STARTING.");
Log.info("Running with " + activation.name() + " activation function and " + loss.name() + " loss function.");
Log.info("Initialization with " + dist.name() + " distribution and " + scale + " scale, holdout ratio " + holdout_ratio);
Log.info("Using seed " + seed);
Key kfile = NFSFileVec.make(find_test_file(file));
Frame frame = ParseDataset2.parse(Key.make(), new Key[] { kfile });
_train = sampleFrame(frame, (long) (frame.numRows() * holdout_ratio), seed);
_test = sampleFrame(frame, (long) (frame.numRows() * (1 - holdout_ratio)), seed + 1);
// Train new Deep Learning
Neurons[] neurons;
DeepLearningModel mymodel;
{
DeepLearning p = new DeepLearning();
p.source = (Frame) _train.clone();
p.response = _train.lastVec();
p.ignored_cols = null;
p.seed = myseed;
p.hidden = hidden;
p.adaptive_rate = false;
p.rho = 0;
p.epsilon = 0;
p.rate = rate;
p.activation = activation;
p.max_w2 = max_w2;
p.epochs = epoch;
p.input_dropout_ratio = input_dropout;
p.rate_annealing = rate_annealing;
p.loss = loss;
p.l1 = l1;
p.l2 = l2;
p.momentum_start = p0;
p.momentum_ramp = pR;
p.momentum_stable = p1;
p.initial_weight_distribution = dist;
p.initial_weight_scale = scale;
p.classification = true;
p.diagnostics = true;
p.validation = null;
p.quiet_mode = true;
p.fast_mode = fast_mode;
//sync once per period
p.train_samples_per_iteration = 0;
//same as old NeuralNet code
p.ignore_const_cols = false;
//same as old NeuralNet code
p.shuffle_training_data = false;
//same as old NeuralNet code
p.nesterov_accelerated_gradient = true;
//don't stop early -> need to compare against old NeuralNet code, which doesn't stop either
p.classification_stop = -1;
//keep 1 chunk for reproducibility
p.force_load_balance = false;
p.replicate_training_data = false;
p.single_node_mode = true;
p.invoke();
mymodel = UKV.get(p.dest());
neurons = DeepLearningTask.makeNeuronsForTesting(mymodel.model_info());
}
// Reference: NeuralNet
Layer[] ls;
NeuralNetModel refmodel;
NeuralNet p = new NeuralNet();
{
Vec[] data = Utils.remove(_train.vecs(), _train.vecs().length - 1);
Vec labels = _train.lastVec();
p.seed = myseed;
p.hidden = hidden;
p.rate = rate;
p.max_w2 = max_w2;
p.epochs = epoch;
p.input_dropout_ratio = input_dropout;
p.rate_annealing = rate_annealing;
p.l1 = l1;
p.l2 = l2;
p.momentum_start = p0;
p.momentum_ramp = pR;
p.momentum_stable = p1;
if (dist == DeepLearning.InitialWeightDistribution.Normal)
p.initial_weight_distribution = InitialWeightDistribution.Normal;
else if (dist == DeepLearning.InitialWeightDistribution.Uniform)
p.initial_weight_distribution = InitialWeightDistribution.Uniform;
else if (dist == DeepLearning.InitialWeightDistribution.UniformAdaptive)
p.initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;
p.initial_weight_scale = scale;
p.diagnostics = true;
p.fast_mode = fast_mode;
p.classification = true;
if (loss == DeepLearning.Loss.MeanSquare)
p.loss = Loss.MeanSquare;
else if (loss == DeepLearning.Loss.CrossEntropy)
p.loss = Loss.CrossEntropy;
ls = new Layer[hidden.length + 2];
ls[0] = new Layer.VecsInput(data, null);
for (int i = 0; i < hidden.length; ++i) {
if (activation == DeepLearning.Activation.Tanh) {
p.activation = NeuralNet.Activation.Tanh;
ls[1 + i] = new Layer.Tanh(hidden[i]);
} else if (activation == DeepLearning.Activation.TanhWithDropout) {
p.activation = Activation.TanhWithDropout;
ls[1 + i] = new Layer.TanhDropout(hidden[i]);
} else if (activation == DeepLearning.Activation.Rectifier) {
p.activation = Activation.Rectifier;
ls[1 + i] = new Layer.Rectifier(hidden[i]);
} else if (activation == DeepLearning.Activation.RectifierWithDropout) {
p.activation = Activation.RectifierWithDropout;
ls[1 + i] = new Layer.RectifierDropout(hidden[i]);
} else if (activation == DeepLearning.Activation.Maxout) {
p.activation = Activation.Maxout;
ls[1 + i] = new Layer.Maxout(hidden[i]);
} else if (activation == DeepLearning.Activation.MaxoutWithDropout) {
p.activation = Activation.MaxoutWithDropout;
ls[1 + i] = new Layer.MaxoutDropout(hidden[i]);
}
}
ls[ls.length - 1] = new Layer.VecSoftmax(labels, null);
for (int i = 0; i < ls.length; i++) {
ls[i].init(ls, i, p);
}
Trainer trainer;
if (threaded)
trainer = new Trainer.Threaded(ls, p.epochs, null, -1);
else
trainer = new Trainer.Direct(ls, p.epochs, null);
trainer.start();
trainer.join();
refmodel = new NeuralNetModel(null, null, _train, ls, p);
}
/**
* Compare MEAN weights and biases in hidden and output layer
*/
for (int n = 1; n < ls.length; ++n) {
Neurons l = neurons[n];
Layer ref = ls[n];
for (int o = 0; o < l._a.size(); o++) {
for (int i = 0; i < l._previous._a.size(); i++) {
a[n] += ref._w[o * l._previous._a.size() + i];
b[n] += l._w.raw()[o * l._previous._a.size() + i];
numweights++;
}
ba[n] += ref._b[o];
bb[n] += l._b.get(o);
numbiases++;
}
}
/**
* Compare predictions
* Note: Reference and H2O each do their internal data normalization,
* so we must use their "own" test data, which is assumed to be created correctly.
*/
water.api.ConfusionMatrix CM = new water.api.ConfusionMatrix();
// Deep Learning scoring
{
//[0] is label, [1]...[4] are the probabilities
Frame fpreds = mymodel.score(_train);
CM = new water.api.ConfusionMatrix();
CM.actual = _train;
CM.vactual = _train.lastVec();
CM.predict = fpreds;
CM.vpredict = fpreds.vecs()[0];
CM.invoke();
StringBuilder sb = new StringBuilder();
trainerr += new ConfusionMatrix(CM.cm).err();
for (String s : sb.toString().split("\n")) Log.info(s);
fpreds.delete();
//[0] is label, [1]...[4] are the probabilities
Frame fpreds2 = mymodel.score(_test);
CM = new water.api.ConfusionMatrix();
CM.actual = _test;
CM.vactual = _test.lastVec();
CM.predict = fpreds2;
CM.vpredict = fpreds2.vecs()[0];
CM.invoke();
sb = new StringBuilder();
CM.toASCII(sb);
testerr += new ConfusionMatrix(CM.cm).err();
for (String s : sb.toString().split("\n")) Log.info(s);
fpreds2.delete();
}
// NeuralNet scoring
long[][] cm;
{
Log.info("\nNeuralNet Scoring:");
//training set
NeuralNet.Errors train = NeuralNet.eval(ls, 0, null);
reftrainerr += train.classification;
//test set
final Frame[] adapted = refmodel.adapt(_test, false);
Vec[] data = Utils.remove(_test.vecs(), _test.vecs().length - 1);
Vec labels = _test.vecs()[_test.vecs().length - 1];
Layer.VecsInput input = (Layer.VecsInput) ls[0];
input.vecs = data;
input._len = data[0].length();
((Layer.VecSoftmax) ls[ls.length - 1]).vec = labels;
//WARNING: only works if training set is large enough to have all classes
int classes = ls[ls.length - 1].units;
cm = new long[classes][classes];
NeuralNet.Errors test = NeuralNet.eval(ls, 0, cm);
Log.info("\nNeuralNet Confusion Matrix:");
Log.info(new ConfusionMatrix(cm).toString());
reftesterr += test.classification;
adapted[1].delete();
}
Assert.assertEquals(cm[0][0], CM.cm[0][0]);
Assert.assertEquals(cm[1][0], CM.cm[1][0]);
Assert.assertEquals(cm[0][1], CM.cm[0][1]);
Assert.assertEquals(cm[1][1], CM.cm[1][1]);
// cleanup
mymodel.delete();
refmodel.delete();
_train.delete();
_test.delete();
frame.delete();
}
trainerr /= (float) num_repeats;
reftrainerr /= (float) num_repeats;
testerr /= (float) num_repeats;
reftesterr /= (float) num_repeats;
/**
* Tolerances
*/
final float abseps = threaded ? 1e-2f : 1e-7f;
final float releps = threaded ? 1e-2f : 1e-5f;
// training set scoring
Log.info("NeuralNet train error " + reftrainerr);
Log.info("Deep Learning train error " + trainerr);
compareVal(reftrainerr, trainerr, abseps, releps);
// test set scoring
Log.info("NeuralNet test error " + reftesterr);
Log.info("Deep Learning test error " + testerr);
compareVal(reftrainerr, trainerr, abseps, releps);
// mean weights/biases
for (int n = 1; n < hidden.length + 2; ++n) {
Log.info("NeuralNet mean weight for layer " + n + ": " + a[n] / numweights);
Log.info("Deep Learning mean weight for layer " + n + ": " + b[n] / numweights);
Log.info("NeuralNet mean bias for layer " + n + ": " + ba[n] / numbiases);
Log.info("Deep Learning mean bias for layer " + n + ": " + bb[n] / numbiases);
compareVal(a[n] / numweights, b[n] / numweights, abseps, releps);
compareVal(ba[n] / numbiases, bb[n] / numbiases, abseps, releps);
}
}
}
}
}
}
}
}
}
}
}
}
}
use of hex.NeuralNet in project h2o-2 by h2oai.
the class NeuralNetMnistDrednet method build.
@Override
protected Layer[] build(Vec[] data, Vec labels, VecsInput inputStats, VecSoftmax outputStats) {
NeuralNet p = new NeuralNet();
Layer[] ls = new Layer[5];
p.hidden = new int[] { 1024, 1024, 2048 };
// p.hidden = new int[]{128,128,256};
ls[0] = new VecsInput(data, inputStats);
for (int i = 1; i < ls.length - 1; i++) ls[i] = new Layer.RectifierDropout(p.hidden[i - 1]);
ls[4] = new VecSoftmax(labels, outputStats);
p.rate = 0.01f;
p.rate_annealing = 1e-6f;
p.epochs = 1000;
p.activation = NeuralNet.Activation.RectifierWithDropout;
p.input_dropout_ratio = 0.2;
p.loss = NeuralNet.Loss.CrossEntropy;
p.max_w2 = 15;
p.momentum_start = 0.5f;
p.momentum_ramp = 1800000;
p.momentum_stable = 0.99f;
p.score_training = 1000;
p.score_validation = 10000;
p.l1 = .00001f;
p.l2 = .00f;
p.initial_weight_distribution = NeuralNet.InitialWeightDistribution.UniformAdaptive;
p.score_interval = 30;
for (int i = 0; i < ls.length; i++) {
ls[i].init(ls, i, p);
}
return ls;
}
Aggregations