use of nars.task.ITask in project narchy by automenta.
the class Exec method executeNow.
/**
* inline, synchronous
*/
final void executeNow(Object t) {
try {
if (t instanceof ITask) {
ITask x = (ITask) t;
NAR nar = this.nar;
while ((x = x.run(nar)) != null) ;
} else if (t instanceof Runnable)
((Runnable) t).run();
else
// if (t instanceof Consumer)
((Consumer) t).accept(nar);
// else {
// throw new UnsupportedOperationException(t + " unexecutable");
// }
} catch (Throwable e) {
logger.error("{} {}", t, Param.DEBUG ? e : e.getMessage());
}
}
use of nars.task.ITask in project narchy by automenta.
the class GoalActionConcept method update.
@Override
public Stream<ITask> update(long pPrev, long pNow, int dur, NAR nar) {
// long pStart =
// //now;
// start - dur / 2;
// long pEnd =
// //now;
// start + dur / 2;
// //now + dur;
float cur = curiosity.floatValue();
Truth goal;
long gStart = pNow - dur / 2;
long gEnd = pNow + dur / 2;
goal = this.goals().truth(gStart, gEnd, nar);
// if (goals.size() > 0)
// System.err.println(term + " " + goal.freq() + " " + goal.evi() + " " + goal.conf());
boolean curi;
if (nar.random().nextFloat() < cur * (1f - (goal != null ? goal.conf() : 0))) {
// // curiosity override
//
float curiConf = // nar.confDefault(GOAL)/4; //<- to max out expectation-driven action
nar.confMin.floatValue() * 8;
// Math.max(goal != null ? goal.conf() : 0, //match goal conf
// //nar.confMin.floatValue() * 2
// nar.confDefault(GOAL)
// );
// nar.confMin.floatValue() * 4;
curi = true;
// nar.confDefault(GOAL) * CURIOSITY_CONF_FACTOR;
// Math.max(goal != null ? goal.conf() : 0,
// nar.confDefault(GOAL) * CURIOSITY_CONF_FACTOR);
// nar.confMin.floatValue()*2);
//
// // float cc =
// // //curiConf;
// // curiConf - (goal != null ? goal.conf() : 0);
// // if (cc > 0) {
//
// // ((float)Math.sin(
// // hashCode() /* for phase shift */
// // + now / (curiPeriod * (2 * Math.PI) * dur)) + 1f)/2f;
//
goal = Truth.theDithered(nar.random().nextFloat(), c2w(curiConf), nar);
// curiosityGoal = null;
// curious = true;
//
// // Truth ct = $.t(f, cc);
// // goal = ct; //curiosity overrides goal
//
// // if (goal == null) {
// // goal = ct;
// // } else {
// // goal = Revision.revise(goal, ct);
// // }
} else {
curi = false;
// action.set(term(), null, stamper, now, dur, nar);
// HACK EXPERIMENT combine belief and goal
// if (belief!=null) {
// float hope = belief.eviEternalized();
// if (goal == null) {
// goal = belief.withEvi(hope); //what one images will happen maybe is what one wants
// } else {
// goal = Revision.revise(goal, belief.withEvi(hope), Math.abs(belief.freq()-goal.freq()), 0 );
// }
// }
// fg = action.set(this, goal, stamper, now, dur, nar);
}
Truth belief = this.beliefs().truth(gStart, gEnd, nar);
Truth feedback = this.motor.apply(belief, goal);
Task feedbackBelief = feedback != null ? this.feedback.add(feedback, gStart, gEnd, dur, nar) : null;
Task curiosityGoal = null;
if (curi && feedbackBelief != null) {
curiosityGoal = this.curiosity(nar, goal, // Truth.theDithered(feedbackBelief.freqMean(dur, pStart, pEnd), goal.evi(), nar),
term, gStart, gEnd, nar.time.nextStamp());
}
return Stream.of(feedbackBelief, (ITask) curiosityGoal).filter(Objects::nonNull);
// return Stream.of(fb, fg).filter(Objects::nonNull);
// return Stream.of(fb).filter(Objects::nonNull);
}
use of nars.task.ITask in project narchy by automenta.
the class NAct method actionBipolarFrequencyDifferential.
default GoalActionAsyncConcept[] actionBipolarFrequencyDifferential(@NotNull Term s, boolean fair, boolean latchPreviousIfUndecided, @NotNull FloatToFloatFunction update) {
Term pt = // $.p(s, PLUS);
$.inh(s, PLUS);
// $.prop(s,PLUS);
// $.p(s, ZeroProduct);
// $.p(s,$.the("\"+\""));
Term nt = // $.p(s, NEG);
$.inh(s, NEG);
// $.prop(s, NEG);
// $.p(ZeroProduct, s);
// $.p(s,$.the("\"-\""));
final float[] g = new float[2];
final float[] c = new float[2];
final long[] lastUpdate = { ETERNAL };
final float[] lastX = { 0 };
// hack
GoalActionAsyncConcept[] CC = new GoalActionAsyncConcept[2];
@NotNull BiConsumer<GoalActionAsyncConcept, Truth> u = (action, gg) -> {
NAR n = nar();
long now = n.time();
if (now != lastUpdate[0]) {
lastUpdate[0] = now;
// reset
CC[0] = CC[1] = null;
}
// float freqEps = n.freqResolution.floatValue();
float confMin = n.confMin.floatValue();
// float eviMin = c2wSafe(confMin);
float feedbackConf = // fairly shared to sum to default
w2c(c2w(n.confDefault(BELIEF)) / 2f);
// n.confDefault(BELIEF);
// n.confDefault(GOAL);
// confMin * ...;
boolean p = action.term().equals(pt);
int ip = p ? 0 : 1;
CC[ip] = action;
g[ip] = gg != null ? // gg.freq()
gg.expectation() : 0f;
// 0.5f;
c[ip] = gg != null ? // gg.evi()
gg.conf() : 0f;
// -1..+1
float x;
boolean curious;
if (CC[0] != null && CC[1] != null) /* both ready */
{
float cMax = Math.max(c[0], c[1]);
float cMin = Math.min(c[0], c[1]);
float coherence = cMin / cMax;
Random rng = n.random();
float cur = curiosity().floatValue();
if (cur > 0 && rng.nextFloat() <= cur) {
x = (rng.nextFloat() - 0.5f) * 2f;
// float curiEvi =
// //c2w(n.confDefault(BELIEF));
// //eviMin*2;
// Math.max(c2wSafe(w2cSafe(eviMin)*2), Util.mean(c[0], c[1])); //match desire conf, min=2*minConf
c[0] = c[1] = feedbackConf;
coherence = 1f;
curious = true;
} else {
curious = false;
if (cMax < confMin) {
if (latchPreviousIfUndecided) {
x = lastX[0];
} else {
x = 0;
}
} else {
// //expectation
// float g0 = g[0]-0.5f;
// float g1 = g[1]-0.5f;
// df = 2f * ((g0) - (g1));
// // /Math.max(Math.abs(g0), Math.abs(g1));
// frequency -======================
// A. subtraction
// subtract
x = ((g[0] - g[1]));
// experimental: lessen by a factor of how equally confident each goal is
if (fair) {
// fully fair
x *= coherence;
// x *= Math.sqrt(coherence); //less sharp than linear
// semi-fair
// df *= 0.5f + 0.5f * (eMin / eMax); //reduction by at most half
}
// df *= 1f - Math.abs(e[0] - e[1]) / eMax;
// df *= Util.sqr(eMin / eMax); //more cautious
// df *= Math.min(w2cSafe(e[0]), w2cSafe(e[1])) / w2cSafe(eMax);
}
}
x = Util.clamp(x, -1f, +1f);
lastX[0] = x;
// -1..+1
float y = update.valueOf(x);
// System.out.println(x + " " + y);
// w2c(Math.abs(y) * c2w(restConf));
PreciseTruth Nb, Ng, Pb, Pg;
if (y == y) {
// y: (-1..+1)
float yp, yn;
if (Math.abs(y) >= n.freqResolution.floatValue()) {
yp = 0.5f + y / 2f;
yn = 1f - yp;
} else {
yp = yn = 0.5f;
}
// float yp = 0.5f + y/2f;
// float yn = 1f - yp;
float pbf = yp;
float nbf = yn;
Pb = $.t(pbf, feedbackConf);
Nb = $.t(nbf, feedbackConf);
// float goalEvi =
// eviMin;
// //max(eviMin, max(e[0], e[1]));
// Pg = curious || e[0] == 0 ? new PreciseTruth(yp, goalEvi, false) : null;
// Ng = curious || e[1] == 0 ? new PreciseTruth(yn, goalEvi, false) : null;
// float confBase = confMin*4; //~ alpha, learning rate
// float fThresh = Float.MIN_NORMAL;
// float yp = y > +fThresh ? Util.lerp(+y, confBase, feedbackConf) : confBase;
// float yn = y < -fThresh ? Util.lerp(-y, confBase, feedbackConf) : confBase;
// Pb = $.t(y > +fThresh ? 1 : 0, y > +fThresh ? yp : feedbackConf - yp);
// Nb = $.t(y < -fThresh ? 1 : 0, y < -fThresh ? yn : feedbackConf - yn);
// //Pg = curious || e[0] == 0 ? new PreciseTruth(1, Util.lerp(+y, confMin2, feedbackConf)) : null;
// Pg = null;
// //Ng = curious || e[1] == 0 ? new PreciseTruth(1, Util.lerp(-y, confMin2, feedbackConf)) : null;
// Ng = null;
// float fThresh = nar().freqResolution.floatValue();
// int sign = (y > fThresh ? +1 : (y < -fThresh ? -1 : 0));
//
// float feedConf =
// w2cSafe(c2wSafe(goalConf)/2f); //half/half
// //goalConf;
// //Math.max(confMin, goalConf * coherence);
// switch (sign) {
// case +1:
// //Pb = $.t(1f, Util.lerp(+y, confBase, feedbackConf));
// Pb = $.t(y/2f + 0.5f, feedConf);
// Nb =
// //null;
// $.t(0, feedConf);
// break;
// case -1:
// Pb =
// //null;
// $.t(0, feedConf);
//
// Nb = $.t(-y/2f + 0.5f, feedConf);
// break;
// case 0:
// //Pb = Nb = null; //no signal
// Pb = Nb = $.t(0, feedConf);
// //Math.max(confMin, feedConf);
// //w2cSafe(c2wSafe(feedConf)/2f))); //zero
// break;
// default:
// throw new UnsupportedOperationException();
// }
Pg = null;
Ng = null;
// if (curious) {
// e[0] = e[1] = 0; //reset to get full evidence override
// }
// float g0 = eviMax - e[0];
// Pg = g0 >= eviMin ? new PreciseTruth(yp, g0, false) : null;
// float g1 = eviMax - e[1];
// Ng = g1 >= eviMin ? new PreciseTruth(yn, g1, false) : null;
} else {
Pb = Nb = Pg = Ng = null;
}
// System.out.println(Pb + "," + Nb + " <- " + g[0] + ";" + c[0] + ", " + g[1] + ';' + c[1]);
CC[0].feedback(Pb, Pg, n);
CC[1].feedback(Nb, Ng, n);
}
};
CauseChannel<ITask> cause = nar().newCauseChannel(s);
GoalActionAsyncConcept p = new GoalActionAsyncConcept(pt, this, cause, u);
GoalActionAsyncConcept n = new GoalActionAsyncConcept(nt, this, cause, u);
addAction(p);
addAction(n);
nar().believe($.inh(s, SECTe.the(PLUS, NEG)).neg(), Tense.Eternal);
CC[0] = p;
CC[1] = n;
return CC;
}
use of nars.task.ITask in project narchy by automenta.
the class NAgent method run.
@Override
public void run() {
if (!enabled.get())
return;
this.last = this.now;
this.now = nar.time();
if (now <= last)
return;
// stretched perceptual duration to the NAgent's effective framerate
int dur = Math.max(nar.dur(), (int) (now - last));
reward = act();
happy.update(last, now, dur, nar);
FloatFloatToObjectFunction<Truth> truther = (prev, next) -> $.t(next, nar.confDefault(BELIEF));
sensors.forEach((key, value) -> value.input(key.update(last, now, truther, dur, nar)));
always(motivation.floatValue());
// HACK TODO compile this to re-used array on init like before
Map.Entry<ActionConcept, CauseChannel<ITask>>[] aa = actions.entrySet().toArray(new Map.Entry[actions.size()]);
// fair chance of ordering to all motors
ArrayUtils.shuffle(aa, random());
for (Map.Entry<ActionConcept, CauseChannel<ITask>> ac : aa) {
Stream<ITask> s = ac.getKey().update(last, now, dur, NAgent.this.nar);
if (s != null)
ac.getValue().input(s);
}
Truth happynowT = nar.beliefTruth(happy, last, now);
float happynow = happynowT != null ? (happynowT.freq() - 0.5f) * 2f : 0;
nar.emotion.happy(motivation.floatValue() * dexterity(last, now) * happynow);
if (trace)
logger.info(summary());
}
Aggregations