use of water.Key in project h2o-2 by h2oai.
the class Expr2Test method rbindTest.
@Test
public void rbindTest() {
Key dest1 = Key.make("f1");
float[] ratios = arf(0.5f);
Frame[] splits = null;
File file1 = TestUtil.find_test_file("smalldata/tnc3_10.csv");
//File file = TestUtil.find_test_file("smalldata/iris/iris_wheader.csv");
//File file = TestUtil.find_test_file("smalldata/cars.csv");
Key fkey1 = NFSFileVec.make(file1);
Frame f = ParseDataset2.parse(dest1, new Key[] { fkey1 });
FrameSplitter fs = new FrameSplitter(f, ratios);
H2O.submitTask(fs).join();
splits = fs.getResult();
Frame rbinded_frame;
Env ev = Exec2.exec("rbind(" + splits[0]._key + "," + splits[1]._key + ")");
try {
rbinded_frame = ev.popAry();
} finally {
if (ev != null)
ev.remove_and_unlock();
}
assertEquals(rbinded_frame.numRows(), f.numRows());
rbinded_frame.delete();
Lockable.delete(dest1);
for (Frame s : splits) if (s != null)
s.delete();
}
use of water.Key in project h2o-2 by h2oai.
the class SparseTest method makeChunk.
protected Chunk makeChunk(double[] vals) {
int nzs = 0;
int[] nonzeros = new int[vals.length];
int j = 0;
for (double d : vals) if (d != 0)
nonzeros[nzs++] = j++;
Key key = Vec.newKey();
AppendableVec av = new AppendableVec(key);
NewChunk nv = new NewChunk(av, 0);
for (double d : vals) {
if (Double.isNaN(d))
nv.addNA();
else if ((long) d == d)
nv.addNum((long) d, 0);
else
nv.addNum(d);
}
nv.close(0, null);
Futures fs = new Futures();
Vec vec = av.close(fs);
fs.blockForPending();
return vec.chunkForChunkIdx(0);
}
use of water.Key in project h2o-2 by h2oai.
the class RunifSplitTest method test1.
@Test
public void test1() {
Key file = NFSFileVec.make(find_test_file(PATH));
Frame fr = ParseDataset2.parse(Key.make("iris_nn2"), new Key[] { file });
Frame[] split = Frame.runifSplit(fr, .70f, -1);
Assert.assertTrue(split[0].numRows() + split[1].numRows() == fr.numRows());
fr.delete();
split[0].delete();
split[1].delete();
}
use of water.Key in project h2o-3 by h2oai.
the class DeepLearningScoreTest method testPubDev928.
/** Load simple dataset, rebalance to a number of chunks > number of rows, and run deep learning */
@Test
public void testPubDev928() {
// Create rebalanced dataset
Key rebalancedKey = Key.make("rebalanced");
NFSFileVec nfs = TestUtil.makeNfsFileVec("smalldata/logreg/prostate.csv");
Frame fr = ParseDataset.parse(Key.make(), nfs._key);
RebalanceDataSet rb = new RebalanceDataSet(fr, rebalancedKey, (int) (fr.numRows() + 1));
H2O.submitTask(rb);
rb.join();
Frame rebalanced = DKV.get(rebalancedKey).get();
// Assert that there is at least one 0-len chunk
assertZeroLengthChunk("Rebalanced dataset should contain at least one 0-len chunk!", rebalanced.anyVec());
DeepLearningModel dlModel = null;
try {
// Launch Deep Learning
DeepLearningParameters dlParams = new DeepLearningParameters();
dlParams._train = rebalancedKey;
dlParams._epochs = 5;
dlParams._response_column = "CAPSULE";
dlModel = new DeepLearning(dlParams).trainModel().get();
} finally {
fr.delete();
rebalanced.delete();
if (dlModel != null)
dlModel.delete();
}
}
use of water.Key in project h2o-3 by h2oai.
the class DeepLearningSpiralsTest method run.
@Test
public void run() {
Scope.enter();
NFSFileVec nfs = TestUtil.makeNfsFileVec("smalldata/junit/two_spiral.csv");
Frame frame = ParseDataset.parse(Key.make(), nfs._key);
Log.info(frame);
int resp = frame.names().length - 1;
for (boolean sparse : new boolean[] { true, false }) {
for (boolean col_major : new boolean[] { false }) {
if (!sparse && col_major)
continue;
Key model_id = Key.make();
// build the model
{
DeepLearningParameters p = new DeepLearningParameters();
p._epochs = 5000;
p._hidden = new int[] { 100 };
p._sparse = sparse;
p._col_major = col_major;
p._activation = DeepLearningParameters.Activation.Tanh;
p._initial_weight_distribution = DeepLearningParameters.InitialWeightDistribution.Normal;
p._initial_weight_scale = 2.5;
p._loss = DeepLearningParameters.Loss.CrossEntropy;
p._train = frame._key;
p._response_column = frame.names()[resp];
// Convert response to categorical
Scope.track(frame.replace(resp, frame.vecs()[resp].toCategoricalVec()));
DKV.put(frame);
p._rho = 0.99;
p._epsilon = 5e-3;
//stop when reaching 0 classification error on training data
p._classification_stop = 0;
p._train_samples_per_iteration = 10000;
p._stopping_rounds = 5;
p._stopping_metric = ScoreKeeper.StoppingMetric.misclassification;
p._score_each_iteration = true;
p._reproducible = true;
p._seed = 1234;
new DeepLearning(p, model_id).trainModel().get();
}
// score and check result
{
DeepLearningModel mymodel = DKV.getGet(model_id);
Frame pred = mymodel.score(frame);
ModelMetricsBinomial mm = ModelMetricsBinomial.getFromDKV(mymodel, frame);
double error = mm._auc.defaultErr();
Log.info("Error: " + error);
if (error > 0.1) {
Assert.fail("Test classification error is not <= 0.1, but " + error + ".");
}
Assert.assertTrue(mymodel.testJavaScoring(frame, pred, 1e-6));
pred.delete();
mymodel.delete();
}
}
}
frame.delete();
Scope.exit();
}
Aggregations