use of nars.concept.scalar.FilteredScalar in project narchy by automenta.
the class NAgent method starting.
/**
* registers sensor, action, and reward concepts with the NAR
* TODO call this in the constructor
*/
@Override
protected void starting(NAR nar) {
synchronized (this) {
Term id = (this.id == null) ? nar.self() : this.id;
// Term happyTerm = id == null ?
// $.the("happy") : //generally happy
// $.p(id, $.the("happy"));
// //$.inh(id, $.the("happy")); //happy in this environment
// //$.prop(id, $.the("happy")); //happiness of this environment
// FloatSupplier happyValue = new FloatCached(
// new FloatNormalized(
// //new FloatPolarNormalized(
// //new FloatHighPass(
// () -> reward
// //)
// ) {
// @Override
// public float asFloat() {
// float f = super.asFloat();
// if (f!=f) return Float.NaN;
// else {
// f = Util.unitize(f);
//
// //assert(f >= 0 && f <= 1f);
//
// //depression curve and offset
// return Util.max(0,f - depress.floatValue());
// }
// }
// // @Override
// // public float min() {
// // return Util.lerp(depress.floatValue(), super.max(), super.min());
// // }
// //
// // @Override
// // public float max() {
// // //decrease the max toward min in proportion to the depression setting
// // return Util.lerp(depress.floatValue(), super.max(), super.min());
// // }
// }.relax(Param.HAPPINESS_RE_SENSITIZATION_RATE),
// nar::time);
FloatSupplier happyValue = new FloatCached(() -> reward - depress.floatValue(), nar::time);
this.happy = // new ActionInfluencingScalar(happyTerm, happyValue);
new FilteredScalar(happyValue, nar, // happiness (raw value)
pair($.inh($.p("happy", "raw"), id), new FloatNormalizer().relax(Param.HAPPINESS_RE_SENSITIZATION_RATE)), // long-term happiness: chronic / satisfaction
pair($.inh($.p("happy", "chronic"), id), compose(new FloatNormalizer().relax(Param.HAPPINESS_RE_SENSITIZATION_RATE), new FloatExpMovingAverage(0.02f))), // short-term happiness: acute / joy
pair($.inh($.p("happy", "acute"), id), compose(new FloatExpMovingAverage(0.1f, false), new FloatPolarNormalizer().relax(Param.HAPPINESS_RE_SENSITIZATION_RATE_FAST))));
happy.pri(() -> motivation.floatValue() * nar.priDefault(BELIEF));
alwaysWant((Iterable) happy, nar.confDefault(GOAL));
actions.keySet().forEach(a -> alwaysQuestion(Op.IMPL.the(happy.term, 1, /*XTERNAL*/
a.term)));
this.in = nar.newCauseChannel(this);
this.now = nar.time();
// head-start
this.last = now - nar.dur();
// finally:
enabled.set(true);
}
}
Aggregations