use of hex.deeplearning.DeepLearningModel in project h2o-2 by h2oai.
the class DeepLearningVsNeuralNet method compare.
@Ignore
@Test
public void compare() throws Exception {
final long seed = 0xc0ffee;
Random rng = new Random(seed);
DeepLearning.Activation[] activations = { DeepLearning.Activation.Maxout, DeepLearning.Activation.MaxoutWithDropout, DeepLearning.Activation.Tanh, DeepLearning.Activation.TanhWithDropout, DeepLearning.Activation.Rectifier, DeepLearning.Activation.RectifierWithDropout };
DeepLearning.Loss[] losses = { DeepLearning.Loss.MeanSquare, DeepLearning.Loss.CrossEntropy };
DeepLearning.InitialWeightDistribution[] dists = { DeepLearning.InitialWeightDistribution.Normal, DeepLearning.InitialWeightDistribution.Uniform, DeepLearning.InitialWeightDistribution.UniformAdaptive };
double[] initial_weight_scales = { 1e-3 + 1e-2 * rng.nextFloat() };
double[] holdout_ratios = { 0.7 + 0.2 * rng.nextFloat() };
int[][] hiddens = { { 1 }, { 1 + rng.nextInt(50) }, { 17, 13 }, { 20, 10, 5 } };
double[] rates = { 0.005 + 1e-2 * rng.nextFloat() };
int[] epochs = { 5 + rng.nextInt(5) };
double[] input_dropouts = { 0, rng.nextFloat() * 0.5 };
double p0 = 0.5 * rng.nextFloat();
long pR = 1000 + rng.nextInt(1000);
double p1 = 0.5 + 0.49 * rng.nextFloat();
double l1 = 1e-5 * rng.nextFloat();
double l2 = 1e-5 * rng.nextFloat();
// rng.nextInt(50);
float max_w2 = Float.POSITIVE_INFINITY;
double rate_annealing = 1e-7 + rng.nextFloat() * 1e-6;
boolean threaded = false;
int num_repeats = 1;
// TODO: test that Deep Learning and NeuralNet agree for Mnist dataset
// String[] files = { "smalldata/mnist/train.csv" };
// hiddens = new int[][]{ {50,50} };
// threaded = true;
// num_repeats = 5;
// TODO: test that Deep Learning and NeuralNet agree for covtype dataset
// String[] files = { "smalldata/covtype/covtype.20k.data.my" };
// hiddens = new int[][]{ {100,100} };
// epochs = new int[]{ 50 };
// threaded = true;
// num_repeats = 2;
String[] files = { "smalldata/iris/iris.csv", "smalldata/neural/two_spiral.data" };
for (DeepLearning.Activation activation : activations) {
for (DeepLearning.Loss loss : losses) {
for (DeepLearning.InitialWeightDistribution dist : dists) {
for (double scale : initial_weight_scales) {
for (double holdout_ratio : holdout_ratios) {
for (double input_dropout : input_dropouts) {
for (int[] hidden : hiddens) {
for (int epoch : epochs) {
for (double rate : rates) {
for (String file : files) {
for (boolean fast_mode : new boolean[] { true, false }) {
float reftrainerr = 0, trainerr = 0;
float reftesterr = 0, testerr = 0;
float[] a = new float[hidden.length + 2];
float[] b = new float[hidden.length + 2];
float[] ba = new float[hidden.length + 2];
float[] bb = new float[hidden.length + 2];
long numweights = 0, numbiases = 0;
for (int repeat = 0; repeat < num_repeats; ++repeat) {
long myseed = seed + repeat;
Log.info("");
Log.info("STARTING.");
Log.info("Running with " + activation.name() + " activation function and " + loss.name() + " loss function.");
Log.info("Initialization with " + dist.name() + " distribution and " + scale + " scale, holdout ratio " + holdout_ratio);
Log.info("Using seed " + seed);
Key kfile = NFSFileVec.make(find_test_file(file));
Frame frame = ParseDataset2.parse(Key.make(), new Key[] { kfile });
_train = sampleFrame(frame, (long) (frame.numRows() * holdout_ratio), seed);
_test = sampleFrame(frame, (long) (frame.numRows() * (1 - holdout_ratio)), seed + 1);
// Train new Deep Learning
Neurons[] neurons;
DeepLearningModel mymodel;
{
DeepLearning p = new DeepLearning();
p.source = (Frame) _train.clone();
p.response = _train.lastVec();
p.ignored_cols = null;
p.seed = myseed;
p.hidden = hidden;
p.adaptive_rate = false;
p.rho = 0;
p.epsilon = 0;
p.rate = rate;
p.activation = activation;
p.max_w2 = max_w2;
p.epochs = epoch;
p.input_dropout_ratio = input_dropout;
p.rate_annealing = rate_annealing;
p.loss = loss;
p.l1 = l1;
p.l2 = l2;
p.momentum_start = p0;
p.momentum_ramp = pR;
p.momentum_stable = p1;
p.initial_weight_distribution = dist;
p.initial_weight_scale = scale;
p.classification = true;
p.diagnostics = true;
p.validation = null;
p.quiet_mode = true;
p.fast_mode = fast_mode;
//sync once per period
p.train_samples_per_iteration = 0;
//same as old NeuralNet code
p.ignore_const_cols = false;
//same as old NeuralNet code
p.shuffle_training_data = false;
//same as old NeuralNet code
p.nesterov_accelerated_gradient = true;
//don't stop early -> need to compare against old NeuralNet code, which doesn't stop either
p.classification_stop = -1;
//keep 1 chunk for reproducibility
p.force_load_balance = false;
p.replicate_training_data = false;
p.single_node_mode = true;
p.invoke();
mymodel = UKV.get(p.dest());
neurons = DeepLearningTask.makeNeuronsForTesting(mymodel.model_info());
}
// Reference: NeuralNet
Layer[] ls;
NeuralNetModel refmodel;
NeuralNet p = new NeuralNet();
{
Vec[] data = Utils.remove(_train.vecs(), _train.vecs().length - 1);
Vec labels = _train.lastVec();
p.seed = myseed;
p.hidden = hidden;
p.rate = rate;
p.max_w2 = max_w2;
p.epochs = epoch;
p.input_dropout_ratio = input_dropout;
p.rate_annealing = rate_annealing;
p.l1 = l1;
p.l2 = l2;
p.momentum_start = p0;
p.momentum_ramp = pR;
p.momentum_stable = p1;
if (dist == DeepLearning.InitialWeightDistribution.Normal)
p.initial_weight_distribution = InitialWeightDistribution.Normal;
else if (dist == DeepLearning.InitialWeightDistribution.Uniform)
p.initial_weight_distribution = InitialWeightDistribution.Uniform;
else if (dist == DeepLearning.InitialWeightDistribution.UniformAdaptive)
p.initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;
p.initial_weight_scale = scale;
p.diagnostics = true;
p.fast_mode = fast_mode;
p.classification = true;
if (loss == DeepLearning.Loss.MeanSquare)
p.loss = Loss.MeanSquare;
else if (loss == DeepLearning.Loss.CrossEntropy)
p.loss = Loss.CrossEntropy;
ls = new Layer[hidden.length + 2];
ls[0] = new Layer.VecsInput(data, null);
for (int i = 0; i < hidden.length; ++i) {
if (activation == DeepLearning.Activation.Tanh) {
p.activation = NeuralNet.Activation.Tanh;
ls[1 + i] = new Layer.Tanh(hidden[i]);
} else if (activation == DeepLearning.Activation.TanhWithDropout) {
p.activation = Activation.TanhWithDropout;
ls[1 + i] = new Layer.TanhDropout(hidden[i]);
} else if (activation == DeepLearning.Activation.Rectifier) {
p.activation = Activation.Rectifier;
ls[1 + i] = new Layer.Rectifier(hidden[i]);
} else if (activation == DeepLearning.Activation.RectifierWithDropout) {
p.activation = Activation.RectifierWithDropout;
ls[1 + i] = new Layer.RectifierDropout(hidden[i]);
} else if (activation == DeepLearning.Activation.Maxout) {
p.activation = Activation.Maxout;
ls[1 + i] = new Layer.Maxout(hidden[i]);
} else if (activation == DeepLearning.Activation.MaxoutWithDropout) {
p.activation = Activation.MaxoutWithDropout;
ls[1 + i] = new Layer.MaxoutDropout(hidden[i]);
}
}
ls[ls.length - 1] = new Layer.VecSoftmax(labels, null);
for (int i = 0; i < ls.length; i++) {
ls[i].init(ls, i, p);
}
Trainer trainer;
if (threaded)
trainer = new Trainer.Threaded(ls, p.epochs, null, -1);
else
trainer = new Trainer.Direct(ls, p.epochs, null);
trainer.start();
trainer.join();
refmodel = new NeuralNetModel(null, null, _train, ls, p);
}
/**
* Compare MEAN weights and biases in hidden and output layer
*/
for (int n = 1; n < ls.length; ++n) {
Neurons l = neurons[n];
Layer ref = ls[n];
for (int o = 0; o < l._a.size(); o++) {
for (int i = 0; i < l._previous._a.size(); i++) {
a[n] += ref._w[o * l._previous._a.size() + i];
b[n] += l._w.raw()[o * l._previous._a.size() + i];
numweights++;
}
ba[n] += ref._b[o];
bb[n] += l._b.get(o);
numbiases++;
}
}
/**
* Compare predictions
* Note: Reference and H2O each do their internal data normalization,
* so we must use their "own" test data, which is assumed to be created correctly.
*/
water.api.ConfusionMatrix CM = new water.api.ConfusionMatrix();
// Deep Learning scoring
{
//[0] is label, [1]...[4] are the probabilities
Frame fpreds = mymodel.score(_train);
CM = new water.api.ConfusionMatrix();
CM.actual = _train;
CM.vactual = _train.lastVec();
CM.predict = fpreds;
CM.vpredict = fpreds.vecs()[0];
CM.invoke();
StringBuilder sb = new StringBuilder();
trainerr += new ConfusionMatrix(CM.cm).err();
for (String s : sb.toString().split("\n")) Log.info(s);
fpreds.delete();
//[0] is label, [1]...[4] are the probabilities
Frame fpreds2 = mymodel.score(_test);
CM = new water.api.ConfusionMatrix();
CM.actual = _test;
CM.vactual = _test.lastVec();
CM.predict = fpreds2;
CM.vpredict = fpreds2.vecs()[0];
CM.invoke();
sb = new StringBuilder();
CM.toASCII(sb);
testerr += new ConfusionMatrix(CM.cm).err();
for (String s : sb.toString().split("\n")) Log.info(s);
fpreds2.delete();
}
// NeuralNet scoring
long[][] cm;
{
Log.info("\nNeuralNet Scoring:");
//training set
NeuralNet.Errors train = NeuralNet.eval(ls, 0, null);
reftrainerr += train.classification;
//test set
final Frame[] adapted = refmodel.adapt(_test, false);
Vec[] data = Utils.remove(_test.vecs(), _test.vecs().length - 1);
Vec labels = _test.vecs()[_test.vecs().length - 1];
Layer.VecsInput input = (Layer.VecsInput) ls[0];
input.vecs = data;
input._len = data[0].length();
((Layer.VecSoftmax) ls[ls.length - 1]).vec = labels;
//WARNING: only works if training set is large enough to have all classes
int classes = ls[ls.length - 1].units;
cm = new long[classes][classes];
NeuralNet.Errors test = NeuralNet.eval(ls, 0, cm);
Log.info("\nNeuralNet Confusion Matrix:");
Log.info(new ConfusionMatrix(cm).toString());
reftesterr += test.classification;
adapted[1].delete();
}
Assert.assertEquals(cm[0][0], CM.cm[0][0]);
Assert.assertEquals(cm[1][0], CM.cm[1][0]);
Assert.assertEquals(cm[0][1], CM.cm[0][1]);
Assert.assertEquals(cm[1][1], CM.cm[1][1]);
// cleanup
mymodel.delete();
refmodel.delete();
_train.delete();
_test.delete();
frame.delete();
}
trainerr /= (float) num_repeats;
reftrainerr /= (float) num_repeats;
testerr /= (float) num_repeats;
reftesterr /= (float) num_repeats;
/**
* Tolerances
*/
final float abseps = threaded ? 1e-2f : 1e-7f;
final float releps = threaded ? 1e-2f : 1e-5f;
// training set scoring
Log.info("NeuralNet train error " + reftrainerr);
Log.info("Deep Learning train error " + trainerr);
compareVal(reftrainerr, trainerr, abseps, releps);
// test set scoring
Log.info("NeuralNet test error " + reftesterr);
Log.info("Deep Learning test error " + testerr);
compareVal(reftrainerr, trainerr, abseps, releps);
// mean weights/biases
for (int n = 1; n < hidden.length + 2; ++n) {
Log.info("NeuralNet mean weight for layer " + n + ": " + a[n] / numweights);
Log.info("Deep Learning mean weight for layer " + n + ": " + b[n] / numweights);
Log.info("NeuralNet mean bias for layer " + n + ": " + ba[n] / numbiases);
Log.info("Deep Learning mean bias for layer " + n + ": " + bb[n] / numbiases);
compareVal(a[n] / numweights, b[n] / numweights, abseps, releps);
compareVal(ba[n] / numbiases, bb[n] / numbiases, abseps, releps);
}
}
}
}
}
}
}
}
}
}
}
}
}
use of hex.deeplearning.DeepLearningModel in project h2o-3 by h2oai.
the class TestCase method execute.
public TestCaseResult execute() throws Exception, AssertionError {
loadTestCaseDataSets();
makeModelParameters();
double startTime = 0, stopTime = 0;
if (!grid) {
Model.Output modelOutput = null;
DRF drfJob;
DRFModel drfModel = null;
GLM glmJob;
GLMModel glmModel = null;
GBM gbmJob;
GBMModel gbmModel = null;
DeepLearning dlJob;
DeepLearningModel dlModel = null;
String bestModelJson = null;
try {
switch(algo) {
case "drf":
drfJob = new DRF((DRFModel.DRFParameters) params);
AccuracyTestingSuite.summaryLog.println("Training DRF model.");
startTime = System.currentTimeMillis();
drfModel = drfJob.trainModel().get();
stopTime = System.currentTimeMillis();
modelOutput = drfModel._output;
bestModelJson = drfModel._parms.toJsonString();
break;
case "glm":
glmJob = new GLM((GLMModel.GLMParameters) params, Key.<GLMModel>make("GLMModel"));
AccuracyTestingSuite.summaryLog.println("Training GLM model.");
startTime = System.currentTimeMillis();
glmModel = glmJob.trainModel().get();
stopTime = System.currentTimeMillis();
modelOutput = glmModel._output;
bestModelJson = glmModel._parms.toJsonString();
break;
case "gbm":
gbmJob = new GBM((GBMModel.GBMParameters) params);
AccuracyTestingSuite.summaryLog.println("Training GBM model.");
startTime = System.currentTimeMillis();
gbmModel = gbmJob.trainModel().get();
stopTime = System.currentTimeMillis();
modelOutput = gbmModel._output;
bestModelJson = gbmModel._parms.toJsonString();
break;
case "dl":
dlJob = new DeepLearning((DeepLearningModel.DeepLearningParameters) params);
AccuracyTestingSuite.summaryLog.println("Training DL model.");
startTime = System.currentTimeMillis();
dlModel = dlJob.trainModel().get();
stopTime = System.currentTimeMillis();
modelOutput = dlModel._output;
bestModelJson = dlModel._parms.toJsonString();
break;
}
} catch (Exception e) {
throw new Exception(e);
} finally {
if (drfModel != null) {
drfModel.delete();
}
if (glmModel != null) {
glmModel.delete();
}
if (gbmModel != null) {
gbmModel.delete();
}
if (dlModel != null) {
dlModel.delete();
}
}
removeTestCaseDataSetFrames();
//Add check if cv is used
if (params._nfolds > 0) {
return new TestCaseResult(testCaseId, getMetrics(modelOutput._training_metrics), getMetrics(modelOutput._cross_validation_metrics), stopTime - startTime, bestModelJson, this, trainingDataSet, testingDataSet);
} else {
return new TestCaseResult(testCaseId, getMetrics(modelOutput._training_metrics), getMetrics(modelOutput._validation_metrics), stopTime - startTime, bestModelJson, this, trainingDataSet, testingDataSet);
}
} else {
assert !modelSelectionCriteria.equals("");
makeGridParameters();
makeSearchCriteria();
Grid grid = null;
Model bestModel = null;
String bestModelJson = null;
try {
SchemaServer.registerAllSchemasIfNecessary();
switch(// TODO: Hack for PUBDEV-2812
algo) {
case "drf":
if (!drfRegistered) {
new DRF(true);
new DRFParametersV3();
drfRegistered = true;
}
break;
case "glm":
if (!glmRegistered) {
new GLM(true);
new GLMParametersV3();
glmRegistered = true;
}
break;
case "gbm":
if (!gbmRegistered) {
new GBM(true);
new GBMParametersV3();
gbmRegistered = true;
}
break;
case "dl":
if (!dlRegistered) {
new DeepLearning(true);
new DeepLearningParametersV3();
dlRegistered = true;
}
break;
}
startTime = System.currentTimeMillis();
// TODO: ModelParametersBuilderFactory parameter must be instantiated properly
Job<Grid> gs = GridSearch.startGridSearch(null, params, hyperParms, new GridSearch.SimpleParametersBuilderFactory<>(), searchCriteria);
grid = gs.get();
stopTime = System.currentTimeMillis();
boolean higherIsBetter = higherIsBetter(modelSelectionCriteria);
double bestScore = higherIsBetter ? -Double.MAX_VALUE : Double.MAX_VALUE;
for (Model m : grid.getModels()) {
double validationMetricScore = getMetrics(m._output._validation_metrics).get(modelSelectionCriteria);
AccuracyTestingSuite.summaryLog.println(modelSelectionCriteria + " for model " + m._key.toString() + " is " + validationMetricScore);
if (higherIsBetter ? validationMetricScore > bestScore : validationMetricScore < bestScore) {
bestScore = validationMetricScore;
bestModel = m;
bestModelJson = bestModel._parms.toJsonString();
}
}
AccuracyTestingSuite.summaryLog.println("Best model: " + bestModel._key.toString());
AccuracyTestingSuite.summaryLog.println("Best model parameters: " + bestModelJson);
} catch (Exception e) {
throw new Exception(e);
} finally {
if (grid != null) {
grid.delete();
}
}
removeTestCaseDataSetFrames();
//Add check if cv is used
if (params._nfolds > 0) {
return new TestCaseResult(testCaseId, getMetrics(bestModel._output._training_metrics), getMetrics(bestModel._output._cross_validation_metrics), stopTime - startTime, bestModelJson, this, trainingDataSet, testingDataSet);
} else {
return new TestCaseResult(testCaseId, getMetrics(bestModel._output._training_metrics), getMetrics(bestModel._output._validation_metrics), stopTime - startTime, bestModelJson, this, trainingDataSet, testingDataSet);
}
}
}
use of hex.deeplearning.DeepLearningModel in project h2o-2 by h2oai.
the class Anomaly method execImpl.
@Override
protected final void execImpl() {
if (dl_autoencoder_model == null)
throw new IllegalArgumentException("Deep Learning Model must be specified.");
DeepLearningModel dlm = UKV.get(dl_autoencoder_model);
if (dlm == null)
throw new IllegalArgumentException("Deep Learning Model not found.");
if (!dlm.get_params().autoencoder)
throw new IllegalArgumentException("Deep Learning Model must be build with autoencoder = true.");
if (thresh == -1) {
Log.info("Mean reconstruction error (MSE) of model on training data: " + dlm.mse());
thresh = 10 * dlm.mse();
Log.info("Setting MSE threshold for anomaly to: " + thresh + ".");
}
StringBuilder sb = new StringBuilder();
sb.append("\nFinding outliers in frame " + source._key.toString() + ".\n");
Frame mse = dlm.scoreAutoEncoder(source);
sb.append("Storing the reconstruction error (MSE) for all rows under: " + dest() + ".\n");
Frame output = new Frame(dest(), new String[] { "Reconstruction.MSE" }, new Vec[] { mse.vecs()[0] });
output.delete_and_lock(null);
output.unlock(null);
final Vec mse_test = mse.anyVec();
sb.append("Mean reconstruction error (MSE): " + mse_test.mean() + ".\n");
// print stats and potential outliers
sb.append("The following data points have a reconstruction error greater than " + thresh + ":\n");
HashSet<Long> outliers = new HashSet<Long>();
for (long i = 0; i < mse_test.length(); i++) {
if (mse_test.at(i) > thresh) {
outliers.add(i);
sb.append(String.format("row %d : MSE = %5f\n", i, mse_test.at(i)));
}
}
Log.info(sb);
}
use of hex.deeplearning.DeepLearningModel in project h2o-2 by h2oai.
the class DeepFeatures method execImpl.
@Override
protected final void execImpl() {
if (dl_model == null)
throw new IllegalArgumentException("Deep Learning Model must be specified.");
DeepLearningModel dlm = UKV.get(dl_model);
if (dlm == null)
throw new IllegalArgumentException("Deep Learning Model not found.");
StringBuilder sb = new StringBuilder();
if (layer < -1 || layer > dlm.get_params().hidden.length - 1)
throw new IllegalArgumentException("Layer must be either -1 or between 0 and " + (dlm.get_params().hidden.length - 1));
if (layer == -1)
layer = dlm.get_params().hidden.length - 1;
int features = dlm.get_params().hidden[layer];
sb.append("\nTransforming frame '" + source._key.toString() + "' with " + source.numCols() + " into " + features + " features with model '" + dl_model + "'\n");
Frame df = dlm.scoreDeepFeatures(source, layer);
sb.append("Storing the new features under: " + dest() + ".\n");
Frame output = new Frame(dest(), df.names(), df.vecs());
output.delete_and_lock(null);
output.unlock(null);
}
use of hex.deeplearning.DeepLearningModel in project h2o-2 by h2oai.
the class DeepLearningMissingTest method run.
@Test
public void run() {
long seed = new Random().nextLong();
DeepLearningModel mymodel = null;
Frame train = null;
Frame test = null;
Frame data = null;
DeepLearning p;
Log.info("");
Log.info("STARTING.");
Log.info("Using seed " + seed);
Map<DeepLearning.MissingValuesHandling, Double> sumErr = new TreeMap<DeepLearning.MissingValuesHandling, Double>();
StringBuilder sb = new StringBuilder();
for (DeepLearning.MissingValuesHandling mvh : new DeepLearning.MissingValuesHandling[] { DeepLearning.MissingValuesHandling.Skip, DeepLearning.MissingValuesHandling.MeanImputation }) {
double sumerr = 0;
Map<Double, Double> map = new TreeMap<Double, Double>();
for (double missing_fraction : new double[] { 0, 0.1, 0.25, 0.5, 0.75, 1 }) {
try {
Key file = NFSFileVec.make(find_test_file("smalldata/weather.csv"));
// Key file = NFSFileVec.make(find_test_file("smalldata/mnist/test.csv.gz"));
data = ParseDataset2.parse(Key.make("data.hex"), new Key[] { file });
// Create holdout test data on clean data (before adding missing values)
FrameSplitter fs = new FrameSplitter(data, new float[] { 0.75f });
H2O.submitTask(fs).join();
Frame[] train_test = fs.getResult();
train = train_test[0];
test = train_test[1];
// add missing values to the training data (excluding the response)
if (missing_fraction > 0) {
Frame frtmp = new Frame(Key.make(), train.names(), train.vecs());
//exclude the response
frtmp.remove(frtmp.numCols() - 1);
DKV.put(frtmp._key, frtmp);
InsertMissingValues imv = new InsertMissingValues();
imv.missing_fraction = missing_fraction;
//use the same seed for Skip and MeanImputation!
imv.seed = seed;
imv.key = frtmp._key;
imv.serve();
//just remove the Frame header (not the chunks)
DKV.remove(frtmp._key);
}
// Build a regularized DL model with polluted training data, score on clean validation set
p = new DeepLearning();
p.source = train;
p.validation = test;
p.response = train.lastVec();
//only for weather data
p.ignored_cols = new int[] { 1, 22 };
p.missing_values_handling = mvh;
p.activation = DeepLearning.Activation.RectifierWithDropout;
p.hidden = new int[] { 200, 200 };
p.l1 = 1e-5;
p.input_dropout_ratio = 0.2;
p.epochs = 10;
p.quiet_mode = true;
try {
Log.info("Starting with " + missing_fraction * 100 + "% missing values added.");
p.invoke();
} catch (Throwable t) {
t.printStackTrace();
throw new RuntimeException(t);
} finally {
p.delete();
}
// Extract the scoring on validation set from the model
mymodel = UKV.get(p.dest());
DeepLearningModel.Errors[] errs = mymodel.scoring_history();
DeepLearningModel.Errors lasterr = errs[errs.length - 1];
double err = lasterr.valid_err;
Log.info("Missing " + missing_fraction * 100 + "% -> Err: " + err);
map.put(missing_fraction, err);
sumerr += err;
} catch (Throwable t) {
t.printStackTrace();
throw new RuntimeException(t);
} finally {
// cleanup
if (mymodel != null) {
mymodel.delete_xval_models();
mymodel.delete_best_model();
mymodel.delete();
}
if (train != null)
train.delete();
if (test != null)
test.delete();
if (data != null)
data.delete();
}
}
sb.append("\nMethod: " + mvh.toString() + "\n");
sb.append("missing fraction --> Error\n");
for (String s : Arrays.toString(map.entrySet().toArray()).split(",")) sb.append(s.replace("=", " --> ")).append("\n");
sb.append('\n');
sb.append("Sum Err: " + sumerr + "\n");
sumErr.put(mvh, sumerr);
}
Log.info(sb.toString());
Assert.assertTrue(sumErr.get(DeepLearning.MissingValuesHandling.Skip) > sumErr.get(DeepLearning.MissingValuesHandling.MeanImputation));
//this holds true for both datasets
Assert.assertTrue(sumErr.get(DeepLearning.MissingValuesHandling.MeanImputation) < 2);
}
Aggregations