use of nars.control.CauseChannel in project narchy by automenta.
the class NAct method actionBipolarFrequencyDifferential.
default GoalActionAsyncConcept[] actionBipolarFrequencyDifferential(@NotNull Term s, boolean fair, boolean latchPreviousIfUndecided, @NotNull FloatToFloatFunction update) {
Term pt = // $.p(s, PLUS);
$.inh(s, PLUS);
// $.prop(s,PLUS);
// $.p(s, ZeroProduct);
// $.p(s,$.the("\"+\""));
Term nt = // $.p(s, NEG);
$.inh(s, NEG);
// $.prop(s, NEG);
// $.p(ZeroProduct, s);
// $.p(s,$.the("\"-\""));
final float[] g = new float[2];
final float[] c = new float[2];
final long[] lastUpdate = { ETERNAL };
final float[] lastX = { 0 };
// hack
GoalActionAsyncConcept[] CC = new GoalActionAsyncConcept[2];
@NotNull BiConsumer<GoalActionAsyncConcept, Truth> u = (action, gg) -> {
NAR n = nar();
long now = n.time();
if (now != lastUpdate[0]) {
lastUpdate[0] = now;
// reset
CC[0] = CC[1] = null;
}
// float freqEps = n.freqResolution.floatValue();
float confMin = n.confMin.floatValue();
// float eviMin = c2wSafe(confMin);
float feedbackConf = // fairly shared to sum to default
w2c(c2w(n.confDefault(BELIEF)) / 2f);
// n.confDefault(BELIEF);
// n.confDefault(GOAL);
// confMin * ...;
boolean p = action.term().equals(pt);
int ip = p ? 0 : 1;
CC[ip] = action;
g[ip] = gg != null ? // gg.freq()
gg.expectation() : 0f;
// 0.5f;
c[ip] = gg != null ? // gg.evi()
gg.conf() : 0f;
// -1..+1
float x;
boolean curious;
if (CC[0] != null && CC[1] != null) /* both ready */
{
float cMax = Math.max(c[0], c[1]);
float cMin = Math.min(c[0], c[1]);
float coherence = cMin / cMax;
Random rng = n.random();
float cur = curiosity().floatValue();
if (cur > 0 && rng.nextFloat() <= cur) {
x = (rng.nextFloat() - 0.5f) * 2f;
// float curiEvi =
// //c2w(n.confDefault(BELIEF));
// //eviMin*2;
// Math.max(c2wSafe(w2cSafe(eviMin)*2), Util.mean(c[0], c[1])); //match desire conf, min=2*minConf
c[0] = c[1] = feedbackConf;
coherence = 1f;
curious = true;
} else {
curious = false;
if (cMax < confMin) {
if (latchPreviousIfUndecided) {
x = lastX[0];
} else {
x = 0;
}
} else {
// //expectation
// float g0 = g[0]-0.5f;
// float g1 = g[1]-0.5f;
// df = 2f * ((g0) - (g1));
// // /Math.max(Math.abs(g0), Math.abs(g1));
// frequency -======================
// A. subtraction
// subtract
x = ((g[0] - g[1]));
// experimental: lessen by a factor of how equally confident each goal is
if (fair) {
// fully fair
x *= coherence;
// x *= Math.sqrt(coherence); //less sharp than linear
// semi-fair
// df *= 0.5f + 0.5f * (eMin / eMax); //reduction by at most half
}
// df *= 1f - Math.abs(e[0] - e[1]) / eMax;
// df *= Util.sqr(eMin / eMax); //more cautious
// df *= Math.min(w2cSafe(e[0]), w2cSafe(e[1])) / w2cSafe(eMax);
}
}
x = Util.clamp(x, -1f, +1f);
lastX[0] = x;
// -1..+1
float y = update.valueOf(x);
// System.out.println(x + " " + y);
// w2c(Math.abs(y) * c2w(restConf));
PreciseTruth Nb, Ng, Pb, Pg;
if (y == y) {
// y: (-1..+1)
float yp, yn;
if (Math.abs(y) >= n.freqResolution.floatValue()) {
yp = 0.5f + y / 2f;
yn = 1f - yp;
} else {
yp = yn = 0.5f;
}
// float yp = 0.5f + y/2f;
// float yn = 1f - yp;
float pbf = yp;
float nbf = yn;
Pb = $.t(pbf, feedbackConf);
Nb = $.t(nbf, feedbackConf);
// float goalEvi =
// eviMin;
// //max(eviMin, max(e[0], e[1]));
// Pg = curious || e[0] == 0 ? new PreciseTruth(yp, goalEvi, false) : null;
// Ng = curious || e[1] == 0 ? new PreciseTruth(yn, goalEvi, false) : null;
// float confBase = confMin*4; //~ alpha, learning rate
// float fThresh = Float.MIN_NORMAL;
// float yp = y > +fThresh ? Util.lerp(+y, confBase, feedbackConf) : confBase;
// float yn = y < -fThresh ? Util.lerp(-y, confBase, feedbackConf) : confBase;
// Pb = $.t(y > +fThresh ? 1 : 0, y > +fThresh ? yp : feedbackConf - yp);
// Nb = $.t(y < -fThresh ? 1 : 0, y < -fThresh ? yn : feedbackConf - yn);
// //Pg = curious || e[0] == 0 ? new PreciseTruth(1, Util.lerp(+y, confMin2, feedbackConf)) : null;
// Pg = null;
// //Ng = curious || e[1] == 0 ? new PreciseTruth(1, Util.lerp(-y, confMin2, feedbackConf)) : null;
// Ng = null;
// float fThresh = nar().freqResolution.floatValue();
// int sign = (y > fThresh ? +1 : (y < -fThresh ? -1 : 0));
//
// float feedConf =
// w2cSafe(c2wSafe(goalConf)/2f); //half/half
// //goalConf;
// //Math.max(confMin, goalConf * coherence);
// switch (sign) {
// case +1:
// //Pb = $.t(1f, Util.lerp(+y, confBase, feedbackConf));
// Pb = $.t(y/2f + 0.5f, feedConf);
// Nb =
// //null;
// $.t(0, feedConf);
// break;
// case -1:
// Pb =
// //null;
// $.t(0, feedConf);
//
// Nb = $.t(-y/2f + 0.5f, feedConf);
// break;
// case 0:
// //Pb = Nb = null; //no signal
// Pb = Nb = $.t(0, feedConf);
// //Math.max(confMin, feedConf);
// //w2cSafe(c2wSafe(feedConf)/2f))); //zero
// break;
// default:
// throw new UnsupportedOperationException();
// }
Pg = null;
Ng = null;
// if (curious) {
// e[0] = e[1] = 0; //reset to get full evidence override
// }
// float g0 = eviMax - e[0];
// Pg = g0 >= eviMin ? new PreciseTruth(yp, g0, false) : null;
// float g1 = eviMax - e[1];
// Ng = g1 >= eviMin ? new PreciseTruth(yn, g1, false) : null;
} else {
Pb = Nb = Pg = Ng = null;
}
// System.out.println(Pb + "," + Nb + " <- " + g[0] + ";" + c[0] + ", " + g[1] + ';' + c[1]);
CC[0].feedback(Pb, Pg, n);
CC[1].feedback(Nb, Ng, n);
}
};
CauseChannel<ITask> cause = nar().newCauseChannel(s);
GoalActionAsyncConcept p = new GoalActionAsyncConcept(pt, this, cause, u);
GoalActionAsyncConcept n = new GoalActionAsyncConcept(nt, this, cause, u);
addAction(p);
addAction(n);
nar().believe($.inh(s, SECTe.the(PLUS, NEG)).neg(), Tense.Eternal);
CC[0] = p;
CC[1] = n;
return CC;
}
use of nars.control.CauseChannel in project narchy by automenta.
the class NAgent method run.
@Override
public void run() {
if (!enabled.get())
return;
this.last = this.now;
this.now = nar.time();
if (now <= last)
return;
// stretched perceptual duration to the NAgent's effective framerate
int dur = Math.max(nar.dur(), (int) (now - last));
reward = act();
happy.update(last, now, dur, nar);
FloatFloatToObjectFunction<Truth> truther = (prev, next) -> $.t(next, nar.confDefault(BELIEF));
sensors.forEach((key, value) -> value.input(key.update(last, now, truther, dur, nar)));
always(motivation.floatValue());
// HACK TODO compile this to re-used array on init like before
Map.Entry<ActionConcept, CauseChannel<ITask>>[] aa = actions.entrySet().toArray(new Map.Entry[actions.size()]);
// fair chance of ordering to all motors
ArrayUtils.shuffle(aa, random());
for (Map.Entry<ActionConcept, CauseChannel<ITask>> ac : aa) {
Stream<ITask> s = ac.getKey().update(last, now, dur, NAgent.this.nar);
if (s != null)
ac.getValue().input(s);
}
Truth happynowT = nar.beliefTruth(happy, last, now);
float happynow = happynowT != null ? (happynowT.freq() - 0.5f) * 2f : 0;
nar.emotion.happy(motivation.floatValue() * dexterity(last, now) * happynow);
if (trace)
logger.info(summary());
}
use of nars.control.CauseChannel in project narchy by automenta.
the class NAct method addAction.
default <A extends ActionConcept> A addAction(A c) {
CauseChannel existing = actions().put(c, nar().newCauseChannel(c));
assert (existing == null);
nar().on(c);
return c;
}
Aggregations