use of nars.Op.BELIEF in project narchy by automenta.
the class RTreeBeliefTableTest method testAccuracy.
static void testAccuracy(int dur, int period, int end, int cap, LongToFloatFunction func) {
NAR n = NARS.shell();
n.time.dur(dur);
Term term = $.p("x");
// 1. populate
// n.log();
TaskConcept c = (TaskConcept) n.conceptualize(term);
@NotNull BeliefTable cb = true ? c.beliefs() : c.goals();
cb.setCapacity(0, cap);
// int numTasks = 0;
System.out.println("points:");
long time;
long start = n.time();
while ((time = n.time()) < end) {
float f = func.valueOf(time);
System.out.print(time + "=" + f + "\t");
n.input($.task(term, BELIEF, f, 0.9f).time(time).setPriThen(0.5f).apply(n));
n.run(period);
c.beliefs().print();
System.out.println();
// numTasks++;
}
System.out.println();
System.out.println();
MultiStatistics<Task> m = new MultiStatistics<Task>().classify("input", (t) -> t.isInput()).classify("derived", (t) -> t instanceof DerivedTask).value("pri", (t) -> t.pri()).value2D("truth", (t) -> new float[] { t.freq(), t.conf() }).value("freqErr", (t) -> Math.abs(((t.freq() - 0.5f) * 2f) - func.valueOf(t.mid()))).add(c.beliefs().streamTasks().collect(toList()));
System.out.println();
m.print();
System.out.println();
c.beliefs().print();
// 2. validate and calculate error
CSVOutput csv = new CSVOutput(System.out, "time", "actual", "approx");
double errSum = 0;
for (long i = start; i < end; i++) {
float actual = func.valueOf(i);
Truth actualTruth = n.beliefTruth(term, i);
float approx, err;
if (actualTruth != null) {
approx = actualTruth.freq();
err = Math.abs(approx - actual);
} else {
approx = Float.NaN;
err = 1f;
}
errSum += err;
csv.out(i, actual, approx);
// System.out.println(n2(i) + "\t" + /*n2(err) + "\t" + */ n2(expected) + "\t" + n2(actual));
}
double avgErr = errSum / (end - start + 1);
System.out.println();
System.out.println(n4(avgErr) + " avg freq err per point");
assertTrue(avgErr < 0.1f);
}
use of nars.Op.BELIEF in project narchy by automenta.
the class NAct method actionBipolarFrequencyDifferential.
default GoalActionAsyncConcept[] actionBipolarFrequencyDifferential(@NotNull Term s, boolean fair, boolean latchPreviousIfUndecided, @NotNull FloatToFloatFunction update) {
Term pt = // $.p(s, PLUS);
$.inh(s, PLUS);
// $.prop(s,PLUS);
// $.p(s, ZeroProduct);
// $.p(s,$.the("\"+\""));
Term nt = // $.p(s, NEG);
$.inh(s, NEG);
// $.prop(s, NEG);
// $.p(ZeroProduct, s);
// $.p(s,$.the("\"-\""));
final float[] g = new float[2];
final float[] c = new float[2];
final long[] lastUpdate = { ETERNAL };
final float[] lastX = { 0 };
// hack
GoalActionAsyncConcept[] CC = new GoalActionAsyncConcept[2];
@NotNull BiConsumer<GoalActionAsyncConcept, Truth> u = (action, gg) -> {
NAR n = nar();
long now = n.time();
if (now != lastUpdate[0]) {
lastUpdate[0] = now;
// reset
CC[0] = CC[1] = null;
}
// float freqEps = n.freqResolution.floatValue();
float confMin = n.confMin.floatValue();
// float eviMin = c2wSafe(confMin);
float feedbackConf = // fairly shared to sum to default
w2c(c2w(n.confDefault(BELIEF)) / 2f);
// n.confDefault(BELIEF);
// n.confDefault(GOAL);
// confMin * ...;
boolean p = action.term().equals(pt);
int ip = p ? 0 : 1;
CC[ip] = action;
g[ip] = gg != null ? // gg.freq()
gg.expectation() : 0f;
// 0.5f;
c[ip] = gg != null ? // gg.evi()
gg.conf() : 0f;
// -1..+1
float x;
boolean curious;
if (CC[0] != null && CC[1] != null) /* both ready */
{
float cMax = Math.max(c[0], c[1]);
float cMin = Math.min(c[0], c[1]);
float coherence = cMin / cMax;
Random rng = n.random();
float cur = curiosity().floatValue();
if (cur > 0 && rng.nextFloat() <= cur) {
x = (rng.nextFloat() - 0.5f) * 2f;
// float curiEvi =
// //c2w(n.confDefault(BELIEF));
// //eviMin*2;
// Math.max(c2wSafe(w2cSafe(eviMin)*2), Util.mean(c[0], c[1])); //match desire conf, min=2*minConf
c[0] = c[1] = feedbackConf;
coherence = 1f;
curious = true;
} else {
curious = false;
if (cMax < confMin) {
if (latchPreviousIfUndecided) {
x = lastX[0];
} else {
x = 0;
}
} else {
// //expectation
// float g0 = g[0]-0.5f;
// float g1 = g[1]-0.5f;
// df = 2f * ((g0) - (g1));
// // /Math.max(Math.abs(g0), Math.abs(g1));
// frequency -======================
// A. subtraction
// subtract
x = ((g[0] - g[1]));
// experimental: lessen by a factor of how equally confident each goal is
if (fair) {
// fully fair
x *= coherence;
// x *= Math.sqrt(coherence); //less sharp than linear
// semi-fair
// df *= 0.5f + 0.5f * (eMin / eMax); //reduction by at most half
}
// df *= 1f - Math.abs(e[0] - e[1]) / eMax;
// df *= Util.sqr(eMin / eMax); //more cautious
// df *= Math.min(w2cSafe(e[0]), w2cSafe(e[1])) / w2cSafe(eMax);
}
}
x = Util.clamp(x, -1f, +1f);
lastX[0] = x;
// -1..+1
float y = update.valueOf(x);
// System.out.println(x + " " + y);
// w2c(Math.abs(y) * c2w(restConf));
PreciseTruth Nb, Ng, Pb, Pg;
if (y == y) {
// y: (-1..+1)
float yp, yn;
if (Math.abs(y) >= n.freqResolution.floatValue()) {
yp = 0.5f + y / 2f;
yn = 1f - yp;
} else {
yp = yn = 0.5f;
}
// float yp = 0.5f + y/2f;
// float yn = 1f - yp;
float pbf = yp;
float nbf = yn;
Pb = $.t(pbf, feedbackConf);
Nb = $.t(nbf, feedbackConf);
// float goalEvi =
// eviMin;
// //max(eviMin, max(e[0], e[1]));
// Pg = curious || e[0] == 0 ? new PreciseTruth(yp, goalEvi, false) : null;
// Ng = curious || e[1] == 0 ? new PreciseTruth(yn, goalEvi, false) : null;
// float confBase = confMin*4; //~ alpha, learning rate
// float fThresh = Float.MIN_NORMAL;
// float yp = y > +fThresh ? Util.lerp(+y, confBase, feedbackConf) : confBase;
// float yn = y < -fThresh ? Util.lerp(-y, confBase, feedbackConf) : confBase;
// Pb = $.t(y > +fThresh ? 1 : 0, y > +fThresh ? yp : feedbackConf - yp);
// Nb = $.t(y < -fThresh ? 1 : 0, y < -fThresh ? yn : feedbackConf - yn);
// //Pg = curious || e[0] == 0 ? new PreciseTruth(1, Util.lerp(+y, confMin2, feedbackConf)) : null;
// Pg = null;
// //Ng = curious || e[1] == 0 ? new PreciseTruth(1, Util.lerp(-y, confMin2, feedbackConf)) : null;
// Ng = null;
// float fThresh = nar().freqResolution.floatValue();
// int sign = (y > fThresh ? +1 : (y < -fThresh ? -1 : 0));
//
// float feedConf =
// w2cSafe(c2wSafe(goalConf)/2f); //half/half
// //goalConf;
// //Math.max(confMin, goalConf * coherence);
// switch (sign) {
// case +1:
// //Pb = $.t(1f, Util.lerp(+y, confBase, feedbackConf));
// Pb = $.t(y/2f + 0.5f, feedConf);
// Nb =
// //null;
// $.t(0, feedConf);
// break;
// case -1:
// Pb =
// //null;
// $.t(0, feedConf);
//
// Nb = $.t(-y/2f + 0.5f, feedConf);
// break;
// case 0:
// //Pb = Nb = null; //no signal
// Pb = Nb = $.t(0, feedConf);
// //Math.max(confMin, feedConf);
// //w2cSafe(c2wSafe(feedConf)/2f))); //zero
// break;
// default:
// throw new UnsupportedOperationException();
// }
Pg = null;
Ng = null;
// if (curious) {
// e[0] = e[1] = 0; //reset to get full evidence override
// }
// float g0 = eviMax - e[0];
// Pg = g0 >= eviMin ? new PreciseTruth(yp, g0, false) : null;
// float g1 = eviMax - e[1];
// Ng = g1 >= eviMin ? new PreciseTruth(yn, g1, false) : null;
} else {
Pb = Nb = Pg = Ng = null;
}
// System.out.println(Pb + "," + Nb + " <- " + g[0] + ";" + c[0] + ", " + g[1] + ';' + c[1]);
CC[0].feedback(Pb, Pg, n);
CC[1].feedback(Nb, Ng, n);
}
};
CauseChannel<ITask> cause = nar().newCauseChannel(s);
GoalActionAsyncConcept p = new GoalActionAsyncConcept(pt, this, cause, u);
GoalActionAsyncConcept n = new GoalActionAsyncConcept(nt, this, cause, u);
addAction(p);
addAction(n);
nar().believe($.inh(s, SECTe.the(PLUS, NEG)).neg(), Tense.Eternal);
CC[0] = p;
CC[1] = n;
return CC;
}
use of nars.Op.BELIEF in project narchy by automenta.
the class NAgentX method runRT.
public static NAR runRT(Function<NAR, NAgent> init, float narFPS, float agentFPS) {
// The.Subterms.the =
// The.Subterms.CaffeineSubtermBuilder.get();
// The.Subterms.HijackSubtermBuilder.get();
// The.Subterms.SoftSubtermBuilder.get();
// The.Compound.the =
// The.Compound.
// //SoftCompoundBuilder.get();
// CaffeineCompoundBuilder.get();
float clockFPS = // agentFPS;
narFPS;
RealTime clock = clockFPS >= 10 / 2f ? /* nyquist threshold between decisecond (0.1) and centisecond (0.01) clock resolution */
new RealTime.CS(true) : new RealTime.DSHalf(true);
clock.durFPS(clockFPS);
// Function<NAR, PrediTerm<Derivation>> deriver = Deriver.deriver(8
// , "motivation.nal"
// //., "relation_introduction.nal"
// );
// int THREADS = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
// Predicate<Activate> randomBool = (a) -> ThreadLocalRandom.current().nextBoolean();
// exe.add(new FocusExec(), (x) -> true);
// exe.add(new FocusExec() {
// {
// concepts.setCapacity(32);
// }
// },
// (x) -> true);
NAR n = new NARS().exe(new WorkerMultiExec(// new Focus.DefaultRevaluator(),
new Focus.AERevaluator(new XoRoShiRo128PlusRandom(1)), 256, 8192) {
{
Util.setExecutor(this);
}
}).time(clock).deriverAdd(1, 1).deriverAdd(2, 2).deriverAdd(3, 3).deriverAdd(5, 5).deriverAdd(6, 8).deriverAdd("motivation.nal").index(new CaffeineIndex(// 800 * 1024,
2500 * 1024, // Integer.MAX_VALUE,
c -> {
return (int) Math.ceil(c.voluplexity());
// return Math.round(
// ((float)c.voluplexity())
// / (1 + 100 * (c.termlinks().priSum() + c.tasklinks().priSum()))
// //(c.beliefs().size() + c.goals().size()))
// );
})).get();
// n.defaultWants();
n.dtMergeOrChoose.set(true);
// 0.5f //nyquist
n.dtDither.set(1f);
// n.timeFocus.set(4);
n.confMin.set(0.01f);
n.freqResolution.set(0.01f);
n.termVolumeMax.set(40);
n.beliefConfDefault.set(0.9f);
n.goalConfDefault.set(0.9f);
float priFactor = 0.2f;
n.beliefPriDefault.set(1f * priFactor);
n.goalPriDefault.set(1f * priFactor);
n.questionPriDefault.set(1f * priFactor);
n.questPriDefault.set(1f * priFactor);
n.activationRate.set(0.5f);
NAgent a = init.apply(n);
// new RLBooster(a, HaiQAgent::new, 1);
// // @Override
// // protected long matchTime(Task task) {
// //
// // //future lookahead to catalyze prediction
// // return n.time() +
// // Util.sqr(n.random().nextInt(3)) * n.dur();
// //
// // }
// };
// {
// AgentBuilder b = MetaGoal.newController(a);
// // .in(a::dexterity)
// // .in(new FloatNormalized(()->a.reward).decay(0.9f))
// // .in(new FloatNormalized(
// // ((Emotivation) n.emotion).cycleDTRealMean::getValue)
// // .decay(0.9f)
// // )
// b.in(new FloatNormalized(
// //TODO use a Long-specific impl of this:
// new FloatFirstOrderDifference(n::time, () -> n.emotion.deriveTask.getValue().longValue())
// ).relax(0.99f))
// // .in(new FloatNormalized(
// // //TODO use a Long-specific impl of this:
// // new FirstOrderDifferenceFloat(n::time, () -> n.emotion.conceptFirePremises.getValue().longValue())
// // ).decay(0.9f)
// .in(new FloatNormalized(
// () -> n.emotion.busyVol.getSum()
// ).relax(0.99f))
// .out(2, (onOff)->{
// switch(onOff) {
// case 0:
// a.enabled.set(false); //pause
// break;
// case 1:
// a.enabled.set(true); //un-pause
// break;
// }
// })
// // ).out(
// // new StepController((x) -> n.time.dur(Math.round(x)), 1, n.dur(), n.dur()*2)
// // .out(
// // StepController.harmonic(n.confMin::set, 0.01f, 0.5f)
// // )//.out(
// // StepController.harmonic(n.truthResolution::setValue, 0.01f, 0.08f)
// // ).out(
// // StepController.harmonic(a.curiosity::setValue, 0.01f, 0.16f)
// // ).get(n);
//
// ;
// new AgentService(new MutableFloat(1), n, b.get());
// }
// n.dtMergeOrChoose.setValue(true);
// STMLinkage stmLink = new STMLinkage(n, 1, false);
// LinkClustering linkClusterPri = new LinkClustering(n, Prioritized::priElseZero /* anything temporal */,
// 32, 128);
// LinkClustering linkClusterConf = new LinkClustering(n, (t) -> t.isBeliefOrGoal() ? t.conf() : Float.NaN,
// 4, 16);
// SpaceGraph.window(col(
// new STMView.BagClusterVis(n, linkClusterPri.bag),
// new STMView.BagClusterVis(n, linkClusterConf.bag)
// ), 800, 600);
// ConjClustering conjClusterBinput = new ConjClustering(n, BELIEF, (Task::isInput), 8, 32);
ConjClustering conjClusterBany = new ConjClustering(n, BELIEF, (t -> true), 8, 64);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t -> true), 4, 16);
// ArithmeticIntroduction arith = new ArithmeticIntroduction(4, n);
// RelationClustering relCluster = new RelationClustering(n,
// (t)->t.isBelief() && !t.isEternal() && !t.term().isTemporal() ? t.conf() : Float.NaN,
// 8, 32);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t->true),8, 32);
// n.runLater(() -> {
// // AudioContext ac = new AudioContext();
// // ac.start();
// // Clock aclock = new Clock(ac, 1000f / (agentFPS * 0.5f));
// // new Metronome(aclock, n);
// new VocalCommentary(null, a);
// //ac.out.dependsOn(aclock);
// });
// /needs tryContent before its safe
Inperience inp = new Inperience(n, 12);
//
// Abbreviation abb = new Abbreviation(n, "z", 3, 6, 10f, 32);
// reflect.ReflectSimilarToTaskTerm refSim = new reflect.ReflectSimilarToTaskTerm(16, n);
// reflect.ReflectClonedTask refTask = new reflect.ReflectClonedTask(16, n);
// a.trace = true;
// n.onTask(t -> {
// if (t instanceof DerivedTask)
// System.out.println(t);
// });
// NInner nin = new NInner(n);
// nin.start();
// AgentService mc = MetaGoal.newController(a);
// init();
// n.onCycle(nn -> {
// float lag = narLoop.lagSumThenClear() + a.running().lagSumThenClear();
// //n.emotion.happy(-lag);
// //n.emotion.happy(n.emotion.busyPri.getSum()/50000f);
// });
// new Anoncepts(8, n);
// new Implier(2f, a,
// 1
// //0,1,4
// );
//
// window(new MatrixView(p.in, (x, gl) -> {
// Draw.colorBipolar(gl, x);
// return 0;
// }), 100, 100);
// get ready
System.gc();
n.runLater(() -> {
chart(a);
SpaceGraph.window(Vis.top(a.nar()), 800, 800);
// window(new ConceptView(a.happy,n), 800, 600);
n.on(a);
// START AGENT
Loop aLoop = a.runFPS(agentFPS);
// n.runLater(() -> {
// new Deriver(a.fire(), Derivers.deriver(6, 8,
// "motivation.nal"
// //, "goal_analogy.nal"
// ).apply(n).deriver, n); //{
// });
});
Loop loop = n.startFPS(narFPS);
return n;
}
use of nars.Op.BELIEF in project narchy by automenta.
the class Recog2D method conceptTraining.
Surface conceptTraining(BeliefVector tv, NAR nar) {
// LinkedHashMap<TaskConcept, BeliefVector.Neuron> out = tv.out;
Plot2D p;
int history = 256;
Gridding g = new Gridding(p = new Plot2D(history, Plot2D.Line).add("Reward", () -> reward), new AspectAlign(new CameraSensorView(sp, this), AspectAlign.Align.Center, sp.width, sp.height), new Gridding(beliefTableCharts(nar, List.of(tv.concepts), 16)), new Gridding(IntStream.range(0, tv.concepts.length).mapToObj(i -> new spacegraph.space2d.widget.text.Label(String.valueOf(i)) {
@Override
protected void paintBelow(GL2 gl) {
Concept c = tv.concepts[i];
BeliefVector.Neuron nn = tv.neurons[i];
float freq, conf;
Truth t = nar.beliefTruth(c, nar.time());
if (t != null) {
conf = t.conf();
freq = t.freq();
} else {
conf = nar.confMin.floatValue();
float defaultFreq = // interpret no-belief as maybe
0.5f;
// Float.NaN //use NaN to force learning of negation as separate from no-belief
freq = defaultFreq;
}
Draw.colorBipolar(gl, 2f * (freq - 0.5f));
float m = 0.5f * conf;
Draw.rect(gl, bounds);
if (tv.verify) {
float error = nn.error;
if (error != error) {
// training phase
// Draw.rect(gl, m / 2, m / 2, 1 - m, 1 - m);
} else {
// verification
// draw backgroudn/border
// gl.glColor3f(error, 1f - error, 0f);
//
// float fontSize = 0.08f;
// gl.glColor3f(1f, 1f, 1f);
// Draw.text(gl, c.term().toString(), fontSize, m / 2, 1f - m / 2, 0);
// Draw.text(gl, "err=" + n2(error), fontSize, m / 2, m / 2, 0);
}
}
}
}).toArray(Surface[]::new)));
final int[] frames = { 0 };
onFrame(() -> {
if (frames[0]++ % imagePeriod == 0) {
nextImage();
}
redraw();
// if (neural.get()) {
// if (nar.time() < trainFrames) {
outs.expect(image);
if (neural.get()) {
train.update(mlpLearn, mlpSupport);
}
p.update();
// s.update();
});
return g;
}
use of nars.Op.BELIEF in project narchy by automenta.
the class ThermostatTest method main.
// @Test
// @Disabled
public static void main(String[] args) {
// void test1() {
// Param.DEBUG = true;
final int DUR = 1;
final int subTrainings = 2;
// pause between episodes
final int thinkDurs = 4;
NAR n = NARS.tmp();
n.time.dur(DUR);
n.timeFocus.set(2);
n.termVolumeMax.set(34);
// n.freqResolution.set(0.05f);
// n.confResolution.set(0.01f);
n.activationRate.set(0.5f);
n.goalPriDefault.set(1f);
// n.forgetRate.set(2f);
// n.deep.set(0.8);
// n.emotion.want(MetaGoal.Desire, 0.2f);
// n.want(MetaGoal.Believe, 0.1f);
// n.want(MetaGoal.Perceive, -0.01f);
float exeThresh = 0.51f;
// new ArithmeticIntroduction(8, n);
new ConjClustering(n, BELIEF, (t) -> true, 8, 32);
// n.priDefault(BELIEF, 0.3f);
// n.logPriMin(System.out, 0.5f);
// n.logWhen(System.out, false, true, true);
// n.log();
boolean[] training = new boolean[] { true };
Opjects op = new Opjects(n) {
// {
// pretend = true;
// }
@Override
@Nullable
protected synchronized Object invoked(Object obj, Method wrapped, Object[] args, Object result) {
if (training[0]) {
n.synch();
// n.runLater(nn -> nn.run(DUR)); //queue some thinking cycles
}
Object y = super.invoked(obj, wrapped, args, result);
if (training[0])
n.run(DUR * thinkDurs);
return y;
}
};
Teacher<Thermostat> env = new Teacher<>(op, Thermostat.class);
Consumer<Thermostat> hotToCold = Thermostat.change(true, false), coldToCold = Thermostat.change(false, false), coldToHot = Thermostat.change(false, true), hotToHot = Thermostat.change(true, true);
Predicate<Thermostat> isCold = x -> x.is() == Thermostat.cold;
Predicate<Thermostat> isHot = x -> x.is() == Thermostat.hot;
n.logWhen(System.out, true, true, true);
boolean stupid = true;
training: do {
training[0] = true;
op.exeThresh.set(1f);
for (int i = 0; i < subTrainings; i++) {
for (Consumer<Thermostat> condition : new Consumer[] { hotToCold, coldToCold }) {
System.out.println("EPISODE START");
n.clear();
env.teach("down", condition, (Thermostat x) -> {
// x.up(); //demonstrate no change
// x.report();
n.run(1);
while (x.is() > Thermostat.cold) {
x.down();
n.run(1);
}
x.report();
n.run(1);
// x.down(); //demonstrate no change
// x.report();
}, isCold);
System.out.println("EPISODE END");
n.run(thinkDurs * n.dur());
// n.concept("do(down)").print();
}
for (Consumer<Thermostat> condition : new Consumer[] { coldToHot, hotToHot }) {
System.out.println("EPISODE START");
n.clear();
env.teach("up", condition, x -> {
// x.down(); //demonstrate no change
// x.report();
n.run(1);
while (!isHot.test(x)) {
x.up();
n.run(1);
}
x.report();
n.run(1);
// x.up(); //demonstrate no change
// x.report();
}, isHot);
System.out.println("EPISODE END");
n.run(thinkDurs * n.dur());
}
}
System.out.println("VALIDATING");
System.out.println();
training[0] = false;
op.exeThresh.set(exeThresh);
// n.log();
// n.run(100);
// new Implier(n, new float[] { 1f },
// $.$("a_Thermostat(down,())"),
// $.$("a_Thermostat(up,())")
// //$.$("a_Thermostat(is,(),#x)")
// );
// try {
// make cold
// n.input(new NALTask($.$("a_Thermostat(should,(),0)"),
// BELIEF, $.t(1f, 0.99f),
// n.time(), n.time(), n.time()+1000,
// n.time.nextInputStamp()).pri(1f));
Thermostat t = env.x;
{
// n.clear();
t.is(3);
t.should(0);
n.run(thinkDurs * n.dur());
Term cold = $.$$("is(a_Thermostat,0)");
// Term cold = $.$safe("(a_Thermostat(is,(),0) &| --a_Thermostat(is,(),3))");
Term hot = $.$$("is(a_Thermostat,3)");
Truth goalTruth = $.t(1f, 0.9f);
DurService xPos = n.wantWhile(cold, goalTruth, new TaskConceptLogger(n, (w) -> (t.current != t.target)));
DurService xNeg = n.wantWhile(hot, goalTruth.neg(), new TaskConceptLogger(n, (w) -> t.current != t.target));
n.run(1);
for (int i = 0; i < 16 && xPos.isOn(); i++) {
int period = 100;
// t.report();
// n.run(period, pause);
n.run(period);
}
xPos.off();
xNeg.off();
t.report();
if (t.is() == t.should()) {
System.out.println("good job nars!");
n.believe($.$$("(learn(up) && learn(down))"), Tense.Present);
stupid = false;
} else {
System.out.println("bad job nars! try again");
n.believe($.$$("(--learn(up) && --learn(down))"), Tense.Present);
}
// n.input(new NALTask($.$safe("a_Thermostat(is,(),0)"),
// GOAL, $.t(1f, 0.95f),
// n.time(), n.time(), n.time() + periods,
// n.time.nextInputStamp()).pri(1f));
// n.input(new NALTask($.$safe("a_Thermostat(is,(),3)"),
// GOAL, $.t(0f, 0.95f),
// n.time(), n.time(), n.time() + periods,
// n.time.nextInputStamp()).pri(1f));
}
} while (false);
// n.run(thinkDurs * n.dur());
{
// n.input(new NALTask($.$safe("a_Thermostat(is,(),3)"),
// GOAL, $.t(0f, 0.99f),
// n.time(), n.time(), n.time()+1000,
// n.time.nextInputStamp()).pri(1f));
}
// while (t.is() != t.should()) {
// int period = 1000;
// t.report();
// n.run(period);
// }
n.tasks().forEach(t -> {
if (!t.isInput())
System.out.println(t);
});
}
Aggregations