use of nars.index.term.map.CaffeineIndex in project narchy by automenta.
the class TrackXY method main.
public static void main(String[] args) {
boolean nars = true;
boolean rl = false;
int dur = 1;
NARS nb = new NARS().exe(new UniExec(64)).time(new CycleTime().dur(dur)).index(// new HijackConceptIndex(4 * 1024, 4)
new CaffeineIndex(32 * 1024));
NAR n = nb.get();
n.termVolumeMax.set(20);
// n.priDefault(BELIEF, 0.2f);
// n.priDefault(GOAL, 0.5f);
n.activationRate.set(0.2f);
// n.forgetRate.set(0.9f);
TrackXY t = new TrackXY(4, 4);
n.on(t);
int experimentTime = 8048;
n.synch();
if (rl) {
new RLBooster(t, // HaiQ::new,
HaiQae::new, // RandomAgent::new,
1);
t.curiosity.set(0);
}
if (nars) {
// Param.DEBUG = true;
// n.log();
// for (String action : new String[]{"up", "down", "left", "right"}) {
// //n.goal($.the(action), Tense.Present, 0f, 0.1f);
// n.goal($.the(action), Tense.Present, 1f, 0.1f);
// }
Deriver d = new Deriver(Derivers.rules(// 1,
1, 8, n, // "list.nal",
"motivation.nal"), n);
d.conceptsPerIteration.set(32);
n.timeFocus.set(2);
ConjClustering cjB = new ConjClustering(n, BELIEF, // (tt)->true,
(tt) -> tt.isInput(), 4, 16);
// ConjClustering cjG = new ConjClustering(n, GOAL,
// (tt)->true,
// //(tt) -> tt.isInput(),
// 5, 16);
// Implier ii = new Implier(t , 0, 1);
// ArithmeticIntroduction ai = new ArithmeticIntroduction(4, n);
window(new Gridding(new AutoSurface(d), new AutoSurface(cjB)), 400, 300);
n.onTask(tt -> {
if (tt instanceof DerivedTask && tt.isGoal()) {
System.out.println(tt.proof());
}
});
}
// n.log();
// n.startFPS(fps);
// t.runFPS(fps);
n.onCycle(t);
final double[] rewardSum = { 0 };
n.onCycle(() -> {
rewardSum[0] += t.reward;
});
n.runLater(() -> {
window(Vis.top(n), 800, 250);
NAgentX.chart(t);
window(new CameraSensorView(t.cam, n) {
@Override
protected void paint(GL2 gl, int dtMS) {
super.paint(gl, dtMS);
RectFloat2D at = cellRect(t.sx, t.sy, 0.5f, 0.5f);
gl.glColor4f(1, 0, 0, 0.9f);
Draw.rect(gl, at.move(x(), y(), 0.01f));
}
}.withControls(), 800, 800);
});
n.run(experimentTime);
// n.startFPS(10f);
// t.runFPS(10f);
// System.out.println(
//
// n4(rewardSum[0] / n.time()) + " avg reward");
// System.exit(0);
}
use of nars.index.term.map.CaffeineIndex in project narchy by automenta.
the class NAgentX method runRT.
public static NAR runRT(Function<NAR, NAgent> init, float narFPS, float agentFPS) {
// The.Subterms.the =
// The.Subterms.CaffeineSubtermBuilder.get();
// The.Subterms.HijackSubtermBuilder.get();
// The.Subterms.SoftSubtermBuilder.get();
// The.Compound.the =
// The.Compound.
// //SoftCompoundBuilder.get();
// CaffeineCompoundBuilder.get();
float clockFPS = // agentFPS;
narFPS;
RealTime clock = clockFPS >= 10 / 2f ? /* nyquist threshold between decisecond (0.1) and centisecond (0.01) clock resolution */
new RealTime.CS(true) : new RealTime.DSHalf(true);
clock.durFPS(clockFPS);
// Function<NAR, PrediTerm<Derivation>> deriver = Deriver.deriver(8
// , "motivation.nal"
// //., "relation_introduction.nal"
// );
// int THREADS = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
// Predicate<Activate> randomBool = (a) -> ThreadLocalRandom.current().nextBoolean();
// exe.add(new FocusExec(), (x) -> true);
// exe.add(new FocusExec() {
// {
// concepts.setCapacity(32);
// }
// },
// (x) -> true);
NAR n = new NARS().exe(new WorkerMultiExec(// new Focus.DefaultRevaluator(),
new Focus.AERevaluator(new XoRoShiRo128PlusRandom(1)), 256, 8192) {
{
Util.setExecutor(this);
}
}).time(clock).deriverAdd(1, 1).deriverAdd(2, 2).deriverAdd(3, 3).deriverAdd(5, 5).deriverAdd(6, 8).deriverAdd("motivation.nal").index(new CaffeineIndex(// 800 * 1024,
2500 * 1024, // Integer.MAX_VALUE,
c -> {
return (int) Math.ceil(c.voluplexity());
// return Math.round(
// ((float)c.voluplexity())
// / (1 + 100 * (c.termlinks().priSum() + c.tasklinks().priSum()))
// //(c.beliefs().size() + c.goals().size()))
// );
})).get();
// n.defaultWants();
n.dtMergeOrChoose.set(true);
// 0.5f //nyquist
n.dtDither.set(1f);
// n.timeFocus.set(4);
n.confMin.set(0.01f);
n.freqResolution.set(0.01f);
n.termVolumeMax.set(40);
n.beliefConfDefault.set(0.9f);
n.goalConfDefault.set(0.9f);
float priFactor = 0.2f;
n.beliefPriDefault.set(1f * priFactor);
n.goalPriDefault.set(1f * priFactor);
n.questionPriDefault.set(1f * priFactor);
n.questPriDefault.set(1f * priFactor);
n.activationRate.set(0.5f);
NAgent a = init.apply(n);
// new RLBooster(a, HaiQAgent::new, 1);
// // @Override
// // protected long matchTime(Task task) {
// //
// // //future lookahead to catalyze prediction
// // return n.time() +
// // Util.sqr(n.random().nextInt(3)) * n.dur();
// //
// // }
// };
// {
// AgentBuilder b = MetaGoal.newController(a);
// // .in(a::dexterity)
// // .in(new FloatNormalized(()->a.reward).decay(0.9f))
// // .in(new FloatNormalized(
// // ((Emotivation) n.emotion).cycleDTRealMean::getValue)
// // .decay(0.9f)
// // )
// b.in(new FloatNormalized(
// //TODO use a Long-specific impl of this:
// new FloatFirstOrderDifference(n::time, () -> n.emotion.deriveTask.getValue().longValue())
// ).relax(0.99f))
// // .in(new FloatNormalized(
// // //TODO use a Long-specific impl of this:
// // new FirstOrderDifferenceFloat(n::time, () -> n.emotion.conceptFirePremises.getValue().longValue())
// // ).decay(0.9f)
// .in(new FloatNormalized(
// () -> n.emotion.busyVol.getSum()
// ).relax(0.99f))
// .out(2, (onOff)->{
// switch(onOff) {
// case 0:
// a.enabled.set(false); //pause
// break;
// case 1:
// a.enabled.set(true); //un-pause
// break;
// }
// })
// // ).out(
// // new StepController((x) -> n.time.dur(Math.round(x)), 1, n.dur(), n.dur()*2)
// // .out(
// // StepController.harmonic(n.confMin::set, 0.01f, 0.5f)
// // )//.out(
// // StepController.harmonic(n.truthResolution::setValue, 0.01f, 0.08f)
// // ).out(
// // StepController.harmonic(a.curiosity::setValue, 0.01f, 0.16f)
// // ).get(n);
//
// ;
// new AgentService(new MutableFloat(1), n, b.get());
// }
// n.dtMergeOrChoose.setValue(true);
// STMLinkage stmLink = new STMLinkage(n, 1, false);
// LinkClustering linkClusterPri = new LinkClustering(n, Prioritized::priElseZero /* anything temporal */,
// 32, 128);
// LinkClustering linkClusterConf = new LinkClustering(n, (t) -> t.isBeliefOrGoal() ? t.conf() : Float.NaN,
// 4, 16);
// SpaceGraph.window(col(
// new STMView.BagClusterVis(n, linkClusterPri.bag),
// new STMView.BagClusterVis(n, linkClusterConf.bag)
// ), 800, 600);
// ConjClustering conjClusterBinput = new ConjClustering(n, BELIEF, (Task::isInput), 8, 32);
ConjClustering conjClusterBany = new ConjClustering(n, BELIEF, (t -> true), 8, 64);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t -> true), 4, 16);
// ArithmeticIntroduction arith = new ArithmeticIntroduction(4, n);
// RelationClustering relCluster = new RelationClustering(n,
// (t)->t.isBelief() && !t.isEternal() && !t.term().isTemporal() ? t.conf() : Float.NaN,
// 8, 32);
// ConjClustering conjClusterG = new ConjClustering(n, GOAL, (t->true),8, 32);
// n.runLater(() -> {
// // AudioContext ac = new AudioContext();
// // ac.start();
// // Clock aclock = new Clock(ac, 1000f / (agentFPS * 0.5f));
// // new Metronome(aclock, n);
// new VocalCommentary(null, a);
// //ac.out.dependsOn(aclock);
// });
// /needs tryContent before its safe
Inperience inp = new Inperience(n, 12);
//
// Abbreviation abb = new Abbreviation(n, "z", 3, 6, 10f, 32);
// reflect.ReflectSimilarToTaskTerm refSim = new reflect.ReflectSimilarToTaskTerm(16, n);
// reflect.ReflectClonedTask refTask = new reflect.ReflectClonedTask(16, n);
// a.trace = true;
// n.onTask(t -> {
// if (t instanceof DerivedTask)
// System.out.println(t);
// });
// NInner nin = new NInner(n);
// nin.start();
// AgentService mc = MetaGoal.newController(a);
// init();
// n.onCycle(nn -> {
// float lag = narLoop.lagSumThenClear() + a.running().lagSumThenClear();
// //n.emotion.happy(-lag);
// //n.emotion.happy(n.emotion.busyPri.getSum()/50000f);
// });
// new Anoncepts(8, n);
// new Implier(2f, a,
// 1
// //0,1,4
// );
//
// window(new MatrixView(p.in, (x, gl) -> {
// Draw.colorBipolar(gl, x);
// return 0;
// }), 100, 100);
// get ready
System.gc();
n.runLater(() -> {
chart(a);
SpaceGraph.window(Vis.top(a.nar()), 800, 800);
// window(new ConceptView(a.happy,n), 800, 600);
n.on(a);
// START AGENT
Loop aLoop = a.runFPS(agentFPS);
// n.runLater(() -> {
// new Deriver(a.fire(), Derivers.deriver(6, 8,
// "motivation.nal"
// //, "goal_analogy.nal"
// ).apply(n).deriver, n); //{
// });
});
Loop loop = n.startFPS(narFPS);
return n;
}
Aggregations