use of nars.video.CameraSensorView in project narchy by automenta.
the class TrackXY method main.
public static void main(String[] args) {
boolean nars = true;
boolean rl = false;
int dur = 1;
NARS nb = new NARS().exe(new UniExec(64)).time(new CycleTime().dur(dur)).index(// new HijackConceptIndex(4 * 1024, 4)
new CaffeineIndex(32 * 1024));
NAR n = nb.get();
n.termVolumeMax.set(20);
// n.priDefault(BELIEF, 0.2f);
// n.priDefault(GOAL, 0.5f);
n.activationRate.set(0.2f);
// n.forgetRate.set(0.9f);
TrackXY t = new TrackXY(4, 4);
n.on(t);
int experimentTime = 8048;
n.synch();
if (rl) {
new RLBooster(t, // HaiQ::new,
HaiQae::new, // RandomAgent::new,
1);
t.curiosity.set(0);
}
if (nars) {
// Param.DEBUG = true;
// n.log();
// for (String action : new String[]{"up", "down", "left", "right"}) {
// //n.goal($.the(action), Tense.Present, 0f, 0.1f);
// n.goal($.the(action), Tense.Present, 1f, 0.1f);
// }
Deriver d = new Deriver(Derivers.rules(// 1,
1, 8, n, // "list.nal",
"motivation.nal"), n);
d.conceptsPerIteration.set(32);
n.timeFocus.set(2);
ConjClustering cjB = new ConjClustering(n, BELIEF, // (tt)->true,
(tt) -> tt.isInput(), 4, 16);
// ConjClustering cjG = new ConjClustering(n, GOAL,
// (tt)->true,
// //(tt) -> tt.isInput(),
// 5, 16);
// Implier ii = new Implier(t , 0, 1);
// ArithmeticIntroduction ai = new ArithmeticIntroduction(4, n);
window(new Gridding(new AutoSurface(d), new AutoSurface(cjB)), 400, 300);
n.onTask(tt -> {
if (tt instanceof DerivedTask && tt.isGoal()) {
System.out.println(tt.proof());
}
});
}
// n.log();
// n.startFPS(fps);
// t.runFPS(fps);
n.onCycle(t);
final double[] rewardSum = { 0 };
n.onCycle(() -> {
rewardSum[0] += t.reward;
});
n.runLater(() -> {
window(Vis.top(n), 800, 250);
NAgentX.chart(t);
window(new CameraSensorView(t.cam, n) {
@Override
protected void paint(GL2 gl, int dtMS) {
super.paint(gl, dtMS);
RectFloat2D at = cellRect(t.sx, t.sy, 0.5f, 0.5f);
gl.glColor4f(1, 0, 0, 0.9f);
Draw.rect(gl, at.move(x(), y(), 0.01f));
}
}.withControls(), 800, 800);
});
n.run(experimentTime);
// n.startFPS(10f);
// t.runFPS(10f);
// System.out.println(
//
// n4(rewardSum[0] / n.time()) + " avg reward");
// System.exit(0);
}
use of nars.video.CameraSensorView in project narchy by automenta.
the class Recog2D method conceptTraining.
Surface conceptTraining(BeliefVector tv, NAR nar) {
// LinkedHashMap<TaskConcept, BeliefVector.Neuron> out = tv.out;
Plot2D p;
int history = 256;
Gridding g = new Gridding(p = new Plot2D(history, Plot2D.Line).add("Reward", () -> reward), new AspectAlign(new CameraSensorView(sp, this), AspectAlign.Align.Center, sp.width, sp.height), new Gridding(beliefTableCharts(nar, List.of(tv.concepts), 16)), new Gridding(IntStream.range(0, tv.concepts.length).mapToObj(i -> new spacegraph.space2d.widget.text.Label(String.valueOf(i)) {
@Override
protected void paintBelow(GL2 gl) {
Concept c = tv.concepts[i];
BeliefVector.Neuron nn = tv.neurons[i];
float freq, conf;
Truth t = nar.beliefTruth(c, nar.time());
if (t != null) {
conf = t.conf();
freq = t.freq();
} else {
conf = nar.confMin.floatValue();
float defaultFreq = // interpret no-belief as maybe
0.5f;
// Float.NaN //use NaN to force learning of negation as separate from no-belief
freq = defaultFreq;
}
Draw.colorBipolar(gl, 2f * (freq - 0.5f));
float m = 0.5f * conf;
Draw.rect(gl, bounds);
if (tv.verify) {
float error = nn.error;
if (error != error) {
// training phase
// Draw.rect(gl, m / 2, m / 2, 1 - m, 1 - m);
} else {
// verification
// draw backgroudn/border
// gl.glColor3f(error, 1f - error, 0f);
//
// float fontSize = 0.08f;
// gl.glColor3f(1f, 1f, 1f);
// Draw.text(gl, c.term().toString(), fontSize, m / 2, 1f - m / 2, 0);
// Draw.text(gl, "err=" + n2(error), fontSize, m / 2, m / 2, 0);
}
}
}
}).toArray(Surface[]::new)));
final int[] frames = { 0 };
onFrame(() -> {
if (frames[0]++ % imagePeriod == 0) {
nextImage();
}
redraw();
// if (neural.get()) {
// if (nar.time() < trainFrames) {
outs.expect(image);
if (neural.get()) {
train.update(mlpLearn, mlpSupport);
}
p.update();
// s.update();
});
return g;
}
Aggregations