use of nars.test.agent.Line1DSimplest in project narchy by automenta.
the class Line1DCalibrate method main.
public static void main(String[] args) {
Param.DEBUG = true;
NAR n = NARS.threadSafe();
// new STMTemporalLinkage(n, 2, false);
n.time.dur(1);
n.termVolumeMax.set(16);
// n.beliefConfidence(0.9f);
// n.goalConfidence(0.5f);
// n.onCycle((nn) -> {
// nn.stats(System.out);
// });
// n.truthResolution.setValue(0.05f);
Line1DSimplest a = new Line1DSimplest() {
// final FloatAveraged rewardAveraged = new FloatAveraged(()->super.act(), 10);
@Override
protected float act() {
float r = super.act();
System.out.println("reward: " + now + "\t^" + n2(i.floatValue()) + "\t@" + n2(o.floatValue()) + "\t\t= " + r);
return r;
}
};
// in time units
float tHz = 0.05f;
// in 0..1.0
float yResolution = 0.1f;
float periods = 16;
// final int runtime = Math.round(periods / tHz);
// Set.of(a.up.term(), a.down.term());
Collection actions = a.actions.values();
n.onTask(t -> {
if (t instanceof DerivedTask) {
if (t.isGoal()) {
if (actions.contains(t.term())) {
float dir = new PreciseTruth(t.freq(), t.evi(a.nar().time(), a.nar().dur()), false).freq() - 0.5f;
// TEST POLARITY
float i = a.i.floatValue();
float o = a.o.floatValue();
float neededDir = (i - o);
boolean good = Math.signum(neededDir) == Math.signum(dir);
/*if (!good)*/
System.err.println(n4(dir) + "\t" + good + " " + i + " <-? " + o);
System.err.println(t.proof());
System.out.println();
}
if (t.isGoal())
System.err.println(t.proof());
} else {
// System.err.println(t.toString(n));
}
}
});
a.speed.set(yResolution);
// a.up.resolution.setValue(yResolution);
// a.down.resolution.setValue(yResolution);
a.in.resolution(yResolution);
a.curiosity.set(0.1f);
// a.in.beliefs().capacity(0, 100, a.nar);
// a.out.beliefs().capacity(0, 100, a.nar);
// a.out.goals().capacity(0, 100, a.nar);
// Line1DTrainer trainer = new Line1DTrainer(a);
// new RLBooster(a, new HaiQAgent(), 5);
// ImplicationBooster.implAccelerator(a);
a.onFrame((z) -> {
a.target(// Math.signum(Math.sin(a.nar.time() * tHz * 2 * PI) ) > 0 ? 1f : -1f
Util.round((float) (0.5f + 0.5f * Math.sin(a.nar().time() * tHz * 2 * PI)), yResolution));
// Util.pause(1);
});
// a.runCycles(runtime);
// new Thread(() -> {
// //NAgentX.chart(a);
// int history = 800;
// window(
// row(
// conceptPlot(a.nar, Lists.newArrayList(
// () -> (float) a.i.floatValue(),
// a.o,
// //a.out.feedback.current!=null ? a.out.feedback.current.freq() : 0f,
// () -> a.reward
// //() -> a.rewardSum
// )
// ,
// history),
// col(
// new Vis.EmotionPlot(history, a),
// new ReflectionSurface<>(a),
// Vis.beliefCharts(history,
// Iterables.concat(a.sensors.keySet(), a.actions.keySet()), a.nar)
// )
// )
// , 900, 900);
//
// }).start();
// n.startFPS(100);
n.run(2000);
// n.tasks().forEach(x -> {
// if (x.isBelief() && x.op()==IMPL) {
// System.out.println(x.proof());
// }
// });
}
use of nars.test.agent.Line1DSimplest in project narchy by automenta.
the class Line1DQ method main.
public static void main(String[] args) {
// Param.DEBUG = true;
NAR n = new NARS().get();
// n.log();
n.time.dur(5);
Line1DSimplest a = new Line1DSimplest();
a.curiosity.set(0.01f);
a.onFrame((z) -> {
a.target(Util.unitize(// (float) (0.5f * (Math.sin(n.time() / 50f) + 1f))
(Math.abs(3484 ^ n.time() / 200) % 11) / 10.0f));
});
// Arkancide a = new Arkancide(n, false, true);
// Tetris a = new Tetris(n, 6, 10, 4);
// a.onFrame(x -> Util.sleep(1));
// a.trace = true;
// Line1DTrainer trainer = new Line1DTrainer(a);
// new RLBooster(a, new HaiQAgent(), 2); n.deriver.rate.setValue(0); a.curiosity.setValue(0f);
NAgentX.chart(a);
// int h = q.ae.W[0].length;
// int w = q.ae.W.length;
// window( grid(
// new MatrixView(w, h, MatrixView.arrayRenderer(q.ae.W)),
// new MatrixView(w, 1, MatrixView.arrayRenderer(q.ae.y)),
// new MatrixView(w, 1, MatrixView.arrayRenderer(q.ae.z))
// ), 500, 500);
float grandTotal = 0;
for (int i = 0; i < 100; i++) {
int period = 1000;
// a.runCycles(period);
}
System.err.println(" grand total = " + grandTotal);
}
Aggregations