use of nars.$ in project narchy by automenta.
the class NAgentX method runRT.
public static NAR runRT(Function<NAR, NAgent> init, float narFPS, float agentFPS) {
// The.Subterms.the =
// The.Subterms.CaffeineSubtermBuilder.get();
// The.Subterms.HijackSubtermBuilder.get();
// The.Subterms.SoftSubtermBuilder.get();
// The.Compound.the =
// The.Compound.
// //SoftCompoundBuilder.get();
// CaffeineCompoundBuilder.get();
float clockFPS = // agentFPS;
narFPS;
RealTime clock = clockFPS >= 10 / 2f ? /* nyquist threshold between decisecond (0.1) and centisecond (0.01) clock resolution */
new RealTime.CS(true) : new RealTime.DSHalf(true);
clock.durFPS(clockFPS);
// Function<NAR, PrediTerm<Derivation>> deriver = Deriver.deriver(8
// , "motivation.nal"
// //., "relation_introduction.nal"
// );
// int THREADS = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
// Predicate<Activate> randomBool = (a) -> ThreadLocalRandom.current().nextBoolean();
// exe.add(new FocusExec(), (x) -> true);
// exe.add(new FocusExec() {
// {
// concepts.setCapacity(32);
// }
// },
// (x) -> true);
NAR n = new NARS().exe(new WorkerMultiExec(// new Focus.DefaultRevaluator(),
new Focus.AERevaluator(new XoRoShiRo128PlusRandom(1)), 256, 8192) {
{
Util.setExecutor(this);
}
}).time(clock).deriverAdd(1, 1).deriverAdd(2, 2).deriverAdd(3, 3).deriverAdd(5, 5).deriverAdd(6, 8).deriverAdd("motivation.nal").index(new CaffeineIndex(// 800 * 1024,
2500 * 1024, // Integer.MAX_VALUE,
c -> {
return (int) Math.ceil(c.voluplexity());
// return Math.round(
// ((float)c.voluplexity())
// / (1 + 100 * (c.termlinks().priSum() + c.tasklinks().priSum()))
// //(c.beliefs().size() + c.goals().size()))
// );
})).get();
// n.defaultWants();
n.dtMergeOrChoose.set(true);
// 0.5f //nyquist
n.dtDither.set(1f);
// n.timeFocus.set(4);
n.confMin.set(0.01f);
n.freqResolution.set(0.01f);
n.termVolumeMax.set(40);
n.beliefConfDefault.set(0.9f);
n.goalConfDefault.set(0.9f);
float priFactor = 0.2f;
n.beliefPriDefault.set(1f * priFactor);
n.goalPriDefault.set(1f * priFactor);
n.questionPriDefault.set(1f * priFactor);
n.questPriDefault.set(1f * priFactor);
n.activationRate.set(0.5f);
NAgent a = init.apply(n);
// new RLBooster(a, HaiQAgent::new, 1);
// // @Override
// // protected long matchTime(Task task) {
// //
// // //future lookahead to catalyze prediction
// // return n.time() +
// // Util.sqr(n.random().nextInt(3)) * n.dur();
// //
// // }
// };
// {
// AgentBuilder b = MetaGoal.newController(a);
// // .in(a::dexterity)
// // .in(new FloatNormalized(()->a.reward).decay(0.9f))
// // .in(new FloatNormalized(
// // ((Emotivation) n.emotion).cycleDTRealMean::getValue)
// // .decay(0.9f)
// // )
// b.in(new FloatNormalized(
// //TODO use a Long-specific impl of this:
// new FloatFirstOrderDifference(n::time, () -> n.emotion.deriveTask.getValue().longValue())
// ).relax(0.99f))
// // .in(new FloatNormalized(
// // //TODO use a Long-specific impl of this:
// // new FirstOrderDifferenceFloat(n::time, () -> n.emotion.conceptFirePremises.getValue().longValue())
// // ).decay(0.9f)
// .in(new FloatNormalized(
// () -> n.emotion.busyVol.getSum()
// ).relax(0.99f))
// .out(2, (onOff)->{
// switch(onOff) {
// case 0:
// a.enabled.set(false); //pause
// break;
// case 1:
// a.enabled.set(true); //un-pause
// break;
// }
// })
// // ).out(
// // new StepController((x) -> n.time.dur(Math.round(x)), 1, n.dur(), n.dur()*2)
// // .out(
// // StepController.harmonic(n.confMin::set, 0.01f, 0.5f)
// // )//.out(
// // StepController.harmonic(n.truthResolution::setValue, 0.01f, 0.08f)
// // ).out(
// // StepController.harmonic(a.curiosity::setValue, 0.01f, 0.16f)
// // ).get(n);
//
// ;
// new AgentService(new MutableFloat(1), n, b.get());
// }
// n.dtMergeOrChoose.setValue(true);
// STMLinkage stmLink = new STMLinkage(n, 1, false);
// LinkClustering linkClusterPri = new LinkClustering(n, Prioritized::priElseZero /* anything temporal */,
// 32, 128);
// LinkClustering linkClusterConf = new LinkClustering(n, (t) -> t.isBeliefOrGoal() ? t.conf() : Float.NaN,
// 4, 16);
// SpaceGraph.window(col(
// new STMView.BagClusterVis(n, linkClusterPri.bag),
// new STMView.BagClusterVis(n, linkClusterConf.bag)
// ), 800, 600);
// ConjClustering conjClusterBinput = new ConjClustering(n, BELIEF, (Task::isInput), 8, 32);
ConjClustering conjClusterBany = new ConjClustering(n, BELIEF, (t -> true), 8, 64);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t -> true), 4, 16);
// ArithmeticIntroduction arith = new ArithmeticIntroduction(4, n);
// RelationClustering relCluster = new RelationClustering(n,
// (t)->t.isBelief() && !t.isEternal() && !t.term().isTemporal() ? t.conf() : Float.NaN,
// 8, 32);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t->true),8, 32);
// n.runLater(() -> {
// // AudioContext ac = new AudioContext();
// // ac.start();
// // Clock aclock = new Clock(ac, 1000f / (agentFPS * 0.5f));
// // new Metronome(aclock, n);
// new VocalCommentary(null, a);
// //ac.out.dependsOn(aclock);
// });
// /needs tryContent before its safe
Inperience inp = new Inperience(n, 12);
//
// Abbreviation abb = new Abbreviation(n, "z", 3, 6, 10f, 32);
// reflect.ReflectSimilarToTaskTerm refSim = new reflect.ReflectSimilarToTaskTerm(16, n);
// reflect.ReflectClonedTask refTask = new reflect.ReflectClonedTask(16, n);
// a.trace = true;
// n.onTask(t -> {
// if (t instanceof DerivedTask)
// System.out.println(t);
// });
// NInner nin = new NInner(n);
// nin.start();
// AgentService mc = MetaGoal.newController(a);
// init();
// n.onCycle(nn -> {
// float lag = narLoop.lagSumThenClear() + a.running().lagSumThenClear();
// //n.emotion.happy(-lag);
// //n.emotion.happy(n.emotion.busyPri.getSum()/50000f);
// });
// new Anoncepts(8, n);
// new Implier(2f, a,
// 1
// //0,1,4
// );
//
// window(new MatrixView(p.in, (x, gl) -> {
// Draw.colorBipolar(gl, x);
// return 0;
// }), 100, 100);
// get ready
System.gc();
n.runLater(() -> {
chart(a);
SpaceGraph.window(Vis.top(a.nar()), 800, 800);
// window(new ConceptView(a.happy,n), 800, 600);
n.on(a);
// START AGENT
Loop aLoop = a.runFPS(agentFPS);
// n.runLater(() -> {
// new Deriver(a.fire(), Derivers.deriver(6, 8,
// "motivation.nal"
// //, "goal_analogy.nal"
// ).apply(n).deriver, n); //{
// });
});
Loop loop = n.startFPS(narFPS);
return n;
}
use of nars.$ in project narchy by automenta.
the class ExeCharts method metaGoalChart.
private static Surface metaGoalChart(NAgent a) {
return new TreeChart<Cause>() {
final DurService on;
final FasterList<ItemVis<Cause>> cache = new FasterList();
final Function<Cause, TreeChart.ItemVis<Cause>> builder = ((i) -> {
short id = i.id;
ItemVis<Cause> item;
if (cache.capacity() - 1 < id)
cache.ensureCapacity(id + 16);
else {
item = cache.get(id);
if (item != null)
return item;
}
String str = i.toString();
if (str.startsWith("class nars."))
// skip default toString
str = str.substring("class nars.".length());
if (str.startsWith("class "))
// skip default toString
str = str.substring(5);
item = new CauseVis(i, str);
cache.set(id, item);
return item;
});
{
on = a.onFrame(() -> {
update(a.nar().causes, (c, i) -> {
float v = c.value();
float r, g, b;
if (v < 0) {
r = 0.75f * Math.max(0.1f, Math.min(1f, -v));
g = 0;
} else {
g = 0.75f * Math.max(0.1f, Math.min(1f, +v));
r = 0;
}
float t = Util.sum(((FloatFunction<Traffic>) (p -> Math.abs(p.last))), c.goal);
b = Math.max(r, g) / 2f * Util.unitize(t);
i.update(v, r, g, b);
// i.updateMomentum(
// //0.01f + Util.sqr(Util.tanhFast(v)+1),
// //Math.signum(v) *(1+Math.abs(v))*(t),
// //Math.signum(v) * t,
// v,
// 0.25f,
// r, g, b);
}, builder);
});
}
@Override
public void stop() {
super.stop();
on.off();
}
};
}
use of nars.$ in project narchy by automenta.
the class ThermostatTest method main.
// @Test
// @Disabled
public static void main(String[] args) {
// void test1() {
// Param.DEBUG = true;
final int DUR = 1;
final int subTrainings = 2;
// pause between episodes
final int thinkDurs = 4;
NAR n = NARS.tmp();
n.time.dur(DUR);
n.timeFocus.set(2);
n.termVolumeMax.set(34);
// n.freqResolution.set(0.05f);
// n.confResolution.set(0.01f);
n.activationRate.set(0.5f);
n.goalPriDefault.set(1f);
// n.forgetRate.set(2f);
// n.deep.set(0.8);
// n.emotion.want(MetaGoal.Desire, 0.2f);
// n.want(MetaGoal.Believe, 0.1f);
// n.want(MetaGoal.Perceive, -0.01f);
float exeThresh = 0.51f;
// new ArithmeticIntroduction(8, n);
new ConjClustering(n, BELIEF, (t) -> true, 8, 32);
// n.priDefault(BELIEF, 0.3f);
// n.logPriMin(System.out, 0.5f);
// n.logWhen(System.out, false, true, true);
// n.log();
boolean[] training = new boolean[] { true };
Opjects op = new Opjects(n) {
// {
// pretend = true;
// }
@Override
@Nullable
protected synchronized Object invoked(Object obj, Method wrapped, Object[] args, Object result) {
if (training[0]) {
n.synch();
// n.runLater(nn -> nn.run(DUR)); //queue some thinking cycles
}
Object y = super.invoked(obj, wrapped, args, result);
if (training[0])
n.run(DUR * thinkDurs);
return y;
}
};
Teacher<Thermostat> env = new Teacher<>(op, Thermostat.class);
Consumer<Thermostat> hotToCold = Thermostat.change(true, false), coldToCold = Thermostat.change(false, false), coldToHot = Thermostat.change(false, true), hotToHot = Thermostat.change(true, true);
Predicate<Thermostat> isCold = x -> x.is() == Thermostat.cold;
Predicate<Thermostat> isHot = x -> x.is() == Thermostat.hot;
n.logWhen(System.out, true, true, true);
boolean stupid = true;
training: do {
training[0] = true;
op.exeThresh.set(1f);
for (int i = 0; i < subTrainings; i++) {
for (Consumer<Thermostat> condition : new Consumer[] { hotToCold, coldToCold }) {
System.out.println("EPISODE START");
n.clear();
env.teach("down", condition, (Thermostat x) -> {
// x.up(); //demonstrate no change
// x.report();
n.run(1);
while (x.is() > Thermostat.cold) {
x.down();
n.run(1);
}
x.report();
n.run(1);
// x.down(); //demonstrate no change
// x.report();
}, isCold);
System.out.println("EPISODE END");
n.run(thinkDurs * n.dur());
// n.concept("do(down)").print();
}
for (Consumer<Thermostat> condition : new Consumer[] { coldToHot, hotToHot }) {
System.out.println("EPISODE START");
n.clear();
env.teach("up", condition, x -> {
// x.down(); //demonstrate no change
// x.report();
n.run(1);
while (!isHot.test(x)) {
x.up();
n.run(1);
}
x.report();
n.run(1);
// x.up(); //demonstrate no change
// x.report();
}, isHot);
System.out.println("EPISODE END");
n.run(thinkDurs * n.dur());
}
}
System.out.println("VALIDATING");
System.out.println();
training[0] = false;
op.exeThresh.set(exeThresh);
// n.log();
// n.run(100);
// new Implier(n, new float[] { 1f },
// $.$("a_Thermostat(down,())"),
// $.$("a_Thermostat(up,())")
// //$.$("a_Thermostat(is,(),#x)")
// );
// try {
// make cold
// n.input(new NALTask($.$("a_Thermostat(should,(),0)"),
// BELIEF, $.t(1f, 0.99f),
// n.time(), n.time(), n.time()+1000,
// n.time.nextInputStamp()).pri(1f));
Thermostat t = env.x;
{
// n.clear();
t.is(3);
t.should(0);
n.run(thinkDurs * n.dur());
Term cold = $.$$("is(a_Thermostat,0)");
// Term cold = $.$safe("(a_Thermostat(is,(),0) &| --a_Thermostat(is,(),3))");
Term hot = $.$$("is(a_Thermostat,3)");
Truth goalTruth = $.t(1f, 0.9f);
DurService xPos = n.wantWhile(cold, goalTruth, new TaskConceptLogger(n, (w) -> (t.current != t.target)));
DurService xNeg = n.wantWhile(hot, goalTruth.neg(), new TaskConceptLogger(n, (w) -> t.current != t.target));
n.run(1);
for (int i = 0; i < 16 && xPos.isOn(); i++) {
int period = 100;
// t.report();
// n.run(period, pause);
n.run(period);
}
xPos.off();
xNeg.off();
t.report();
if (t.is() == t.should()) {
System.out.println("good job nars!");
n.believe($.$$("(learn(up) && learn(down))"), Tense.Present);
stupid = false;
} else {
System.out.println("bad job nars! try again");
n.believe($.$$("(--learn(up) && --learn(down))"), Tense.Present);
}
// n.input(new NALTask($.$safe("a_Thermostat(is,(),0)"),
// GOAL, $.t(1f, 0.95f),
// n.time(), n.time(), n.time() + periods,
// n.time.nextInputStamp()).pri(1f));
// n.input(new NALTask($.$safe("a_Thermostat(is,(),3)"),
// GOAL, $.t(0f, 0.95f),
// n.time(), n.time(), n.time() + periods,
// n.time.nextInputStamp()).pri(1f));
}
} while (false);
// n.run(thinkDurs * n.dur());
{
// n.input(new NALTask($.$safe("a_Thermostat(is,(),3)"),
// GOAL, $.t(0f, 0.99f),
// n.time(), n.time(), n.time()+1000,
// n.time.nextInputStamp()).pri(1f));
}
// while (t.is() != t.should()) {
// int period = 1000;
// t.report();
// n.run(period);
// }
n.tasks().forEach(t -> {
if (!t.isInput())
System.out.println(t);
});
}
Aggregations