use of nars.task.DerivedTask in project narchy by automenta.
the class Line1DCalibrate method main.
public static void main(String[] args) {
Param.DEBUG = true;
NAR n = NARS.threadSafe();
// new STMTemporalLinkage(n, 2, false);
n.time.dur(1);
n.termVolumeMax.set(16);
// n.beliefConfidence(0.9f);
// n.goalConfidence(0.5f);
// n.onCycle((nn) -> {
// nn.stats(System.out);
// });
// n.truthResolution.setValue(0.05f);
Line1DSimplest a = new Line1DSimplest() {
// final FloatAveraged rewardAveraged = new FloatAveraged(()->super.act(), 10);
@Override
protected float act() {
float r = super.act();
System.out.println("reward: " + now + "\t^" + n2(i.floatValue()) + "\t@" + n2(o.floatValue()) + "\t\t= " + r);
return r;
}
};
// in time units
float tHz = 0.05f;
// in 0..1.0
float yResolution = 0.1f;
float periods = 16;
// final int runtime = Math.round(periods / tHz);
// Set.of(a.up.term(), a.down.term());
Collection actions = a.actions.values();
n.onTask(t -> {
if (t instanceof DerivedTask) {
if (t.isGoal()) {
if (actions.contains(t.term())) {
float dir = new PreciseTruth(t.freq(), t.evi(a.nar().time(), a.nar().dur()), false).freq() - 0.5f;
// TEST POLARITY
float i = a.i.floatValue();
float o = a.o.floatValue();
float neededDir = (i - o);
boolean good = Math.signum(neededDir) == Math.signum(dir);
/*if (!good)*/
System.err.println(n4(dir) + "\t" + good + " " + i + " <-? " + o);
System.err.println(t.proof());
System.out.println();
}
if (t.isGoal())
System.err.println(t.proof());
} else {
// System.err.println(t.toString(n));
}
}
});
a.speed.set(yResolution);
// a.up.resolution.setValue(yResolution);
// a.down.resolution.setValue(yResolution);
a.in.resolution(yResolution);
a.curiosity.set(0.1f);
// a.in.beliefs().capacity(0, 100, a.nar);
// a.out.beliefs().capacity(0, 100, a.nar);
// a.out.goals().capacity(0, 100, a.nar);
// Line1DTrainer trainer = new Line1DTrainer(a);
// new RLBooster(a, new HaiQAgent(), 5);
// ImplicationBooster.implAccelerator(a);
a.onFrame((z) -> {
a.target(// Math.signum(Math.sin(a.nar.time() * tHz * 2 * PI) ) > 0 ? 1f : -1f
Util.round((float) (0.5f + 0.5f * Math.sin(a.nar().time() * tHz * 2 * PI)), yResolution));
// Util.pause(1);
});
// a.runCycles(runtime);
// new Thread(() -> {
// //NAgentX.chart(a);
// int history = 800;
// window(
// row(
// conceptPlot(a.nar, Lists.newArrayList(
// () -> (float) a.i.floatValue(),
// a.o,
// //a.out.feedback.current!=null ? a.out.feedback.current.freq() : 0f,
// () -> a.reward
// //() -> a.rewardSum
// )
// ,
// history),
// col(
// new Vis.EmotionPlot(history, a),
// new ReflectionSurface<>(a),
// Vis.beliefCharts(history,
// Iterables.concat(a.sensors.keySet(), a.actions.keySet()), a.nar)
// )
// )
// , 900, 900);
//
// }).start();
// n.startFPS(100);
n.run(2000);
// n.tasks().forEach(x -> {
// if (x.isBelief() && x.op()==IMPL) {
// System.out.println(x.proof());
// }
// });
}
use of nars.task.DerivedTask in project narchy by automenta.
the class TrackXY method main.
public static void main(String[] args) {
boolean nars = true;
boolean rl = false;
int dur = 1;
NARS nb = new NARS().exe(new UniExec(64)).time(new CycleTime().dur(dur)).index(// new HijackConceptIndex(4 * 1024, 4)
new CaffeineIndex(32 * 1024));
NAR n = nb.get();
n.termVolumeMax.set(20);
// n.priDefault(BELIEF, 0.2f);
// n.priDefault(GOAL, 0.5f);
n.activationRate.set(0.2f);
// n.forgetRate.set(0.9f);
TrackXY t = new TrackXY(4, 4);
n.on(t);
int experimentTime = 8048;
n.synch();
if (rl) {
new RLBooster(t, // HaiQ::new,
HaiQae::new, // RandomAgent::new,
1);
t.curiosity.set(0);
}
if (nars) {
// Param.DEBUG = true;
// n.log();
// for (String action : new String[]{"up", "down", "left", "right"}) {
// //n.goal($.the(action), Tense.Present, 0f, 0.1f);
// n.goal($.the(action), Tense.Present, 1f, 0.1f);
// }
Deriver d = new Deriver(Derivers.rules(// 1,
1, 8, n, // "list.nal",
"motivation.nal"), n);
d.conceptsPerIteration.set(32);
n.timeFocus.set(2);
ConjClustering cjB = new ConjClustering(n, BELIEF, // (tt)->true,
(tt) -> tt.isInput(), 4, 16);
// ConjClustering cjG = new ConjClustering(n, GOAL,
// (tt)->true,
// //(tt) -> tt.isInput(),
// 5, 16);
// Implier ii = new Implier(t , 0, 1);
// ArithmeticIntroduction ai = new ArithmeticIntroduction(4, n);
window(new Gridding(new AutoSurface(d), new AutoSurface(cjB)), 400, 300);
n.onTask(tt -> {
if (tt instanceof DerivedTask && tt.isGoal()) {
System.out.println(tt.proof());
}
});
}
// n.log();
// n.startFPS(fps);
// t.runFPS(fps);
n.onCycle(t);
final double[] rewardSum = { 0 };
n.onCycle(() -> {
rewardSum[0] += t.reward;
});
n.runLater(() -> {
window(Vis.top(n), 800, 250);
NAgentX.chart(t);
window(new CameraSensorView(t.cam, n) {
@Override
protected void paint(GL2 gl, int dtMS) {
super.paint(gl, dtMS);
RectFloat2D at = cellRect(t.sx, t.sy, 0.5f, 0.5f);
gl.glColor4f(1, 0, 0, 0.9f);
Draw.rect(gl, at.move(x(), y(), 0.01f));
}
}.withControls(), 800, 800);
});
n.run(experimentTime);
// n.startFPS(10f);
// t.runFPS(10f);
// System.out.println(
//
// n4(rewardSum[0] / n.time()) + " avg reward");
// System.exit(0);
}
use of nars.task.DerivedTask in project narchy by automenta.
the class Taskify method test.
/**
* note: the return value here shouldnt matter so just return true anyway
*/
@Override
public boolean test(Derivation d) {
Truth tru = d.concTruth;
if (tru != null) {
float finalEvi = tru.evi() * d.concEviFactor;
if (d.eviMin > finalEvi) {
d.use(Param.TTL_EVI_INSUFFICIENT);
return true;
}
tru = tru.withEvi(finalEvi);
}
Term x0 = d.derivedTerm.get();
Term x = d.anon.get(x0).normalize();
long[] occ = d.concOcc;
byte punc = d.concPunc;
assert (punc != 0) : "no punctuation assigned";
DerivedTask t = (DerivedTask) Task.tryTask(x, punc, tru, (C, tr) -> {
int dither = d.ditherTime;
long start = Tense.dither(occ[0], dither);
long end = Tense.dither(occ[1], dither);
assert (end >= start) : "task has reversed occurrence: " + start + ".." + end;
return Param.DEBUG ? new DebugDerivedTask(C, punc, tr, start, end, d) : new DerivedTask(C, punc, tr, start, end, d);
});
if (t == null) {
d.nar.emotion.deriveFailTaskify.increment();
return spam(d, Param.TTL_DERIVE_TASK_FAIL);
}
if (same(t, d._task, d.freqRes) || (d._belief != null && same(t, d._belief, d.freqRes))) {
d.nar.emotion.deriveFailParentDuplicate.increment();
return spam(d, Param.TTL_DERIVE_TASK_SAME);
}
if (d.single)
t.setCyclic(true);
float priority = d.deriver.prioritize.pri(t, d);
if (priority != priority) {
d.nar.emotion.deriveFailPrioritize.increment();
return spam(d, Param.TTL_DERIVE_TASK_PRIORITIZE);
}
t.priSet(priority);
t.cause = ArrayUtils.addAll(d.parentCause, channel.id);
if (d.add(t) != t) {
d.nar.emotion.deriveFailDerivationDuplicate.increment();
spam(d, Param.TTL_DERIVE_TASK_REPEAT);
} else {
if (Param.DEBUG)
t.log(channel.ruleString);
d.use(Param.TTL_DERIVE_TASK_SUCCESS);
}
return true;
}
use of nars.task.DerivedTask in project narchy by automenta.
the class WorkerMultiExecTest method testValueDerivationBranches.
@Test
public void testValueDerivationBranches() throws Narsese.NarseseException {
// int threads = 1;
// Exec exe = new MultiExec(32, threads, 2);
Exec exe = new UniExec(32);
NAR n = new NARS().deriverAdd(1, 1).deriverAdd(6, 6).exe(exe).get();
// all -1 except goal production
Arrays.fill(n.emotion.want, -1);
n.emotion.want(MetaGoal.Desire, +1);
Exec.Revaluator r = new Focus.DefaultRevaluator();
int cycles = 100;
// 2 competing independent processes. NAL1 rules will apply to one, and NAL6 rules apply to ther other.
// measure the amount of derivation work occurring for each
ByteIntHashMap byPunc = new ByteIntHashMap();
n.onTask(t -> {
if (t instanceof DerivedTask) {
byPunc.addToValue(t.punc(), 1);
}
});
n.log();
n.input("(x==>y).");
n.input("f:a.");
for (int i = 0; i < cycles; i++) {
n.input("f:b. :|:");
n.input("x! :|:");
n.run(1);
r.update(n);
}
System.out.println(byPunc);
n.causes.forEach(c -> {
double sum = Util.sum((ToDoubleFunction<Traffic>) (t -> t.total), c.goal);
if (sum > Double.MIN_NORMAL) {
System.out.println(Arrays.toString(c.goal) + "\t" + c);
}
});
}
Aggregations