use of nars.time.Tense.ETERNAL in project narchy by automenta.
the class ProNALTest method test1.
@Test
public void test1() throws InvalidTheoryException, Narsese.NarseseException, IOException, URISyntaxException {
// Theory t = Theory.string(
// "add(0,X,X).",
// "add(s(X),Y,s(Z)):-add(X,Y,Z).\n",
// "goal(R):-add(s(s(0)),s(s(0)),R)."
// );
Theory t = Theory.resource("../../../resources/prolog/furniture.pl");
Param.DEBUG = true;
NAR n = NARS.tmp(6);
n.questPriDefault.set(1f);
n.beliefPriDefault.set(0.5f);
// n.log();
Set<String> answers = new TreeSet();
for (nars.term.Term xx : PrologToNAL.N(t)) {
if (Op.functor(xx, (xt) -> xt.equals(PrologToNAL.QUESTION_GOAL) ? xt : null) != null) {
Term qTerm = Operator.args(xx).sub(0).normalize();
// n.question(q);
n.question(qTerm, ETERNAL, (q, a) -> {
if (answers.add(a.term().toString())) {
System.err.println(q + " " + a);
System.err.println(a.proof());
}
/*else {
System.err.println("dup");
}*/
});
} else {
n.believe(xx.normalize());
}
}
n.run(2500);
// ?- made_of(your_chair,X), colour(X,Colour).
assertTrue(answers.contains("(colour(wood,brown)&&made_of(your_chair,wood))"));
// n.concepts().forEach(c -> {
// c.print();
// });
/*
[0] *** ANSWER=goal(s(s(s(s(0)))))
TOTAL ANSWERS=1
*/
}
use of nars.time.Tense.ETERNAL in project narchy by automenta.
the class NAct method actionBipolarFrequencyDifferential.
default GoalActionAsyncConcept[] actionBipolarFrequencyDifferential(@NotNull Term s, boolean fair, boolean latchPreviousIfUndecided, @NotNull FloatToFloatFunction update) {
Term pt = // $.p(s, PLUS);
$.inh(s, PLUS);
// $.prop(s,PLUS);
// $.p(s, ZeroProduct);
// $.p(s,$.the("\"+\""));
Term nt = // $.p(s, NEG);
$.inh(s, NEG);
// $.prop(s, NEG);
// $.p(ZeroProduct, s);
// $.p(s,$.the("\"-\""));
final float[] g = new float[2];
final float[] c = new float[2];
final long[] lastUpdate = { ETERNAL };
final float[] lastX = { 0 };
// hack
GoalActionAsyncConcept[] CC = new GoalActionAsyncConcept[2];
@NotNull BiConsumer<GoalActionAsyncConcept, Truth> u = (action, gg) -> {
NAR n = nar();
long now = n.time();
if (now != lastUpdate[0]) {
lastUpdate[0] = now;
// reset
CC[0] = CC[1] = null;
}
// float freqEps = n.freqResolution.floatValue();
float confMin = n.confMin.floatValue();
// float eviMin = c2wSafe(confMin);
float feedbackConf = // fairly shared to sum to default
w2c(c2w(n.confDefault(BELIEF)) / 2f);
// n.confDefault(BELIEF);
// n.confDefault(GOAL);
// confMin * ...;
boolean p = action.term().equals(pt);
int ip = p ? 0 : 1;
CC[ip] = action;
g[ip] = gg != null ? // gg.freq()
gg.expectation() : 0f;
// 0.5f;
c[ip] = gg != null ? // gg.evi()
gg.conf() : 0f;
// -1..+1
float x;
boolean curious;
if (CC[0] != null && CC[1] != null) /* both ready */
{
float cMax = Math.max(c[0], c[1]);
float cMin = Math.min(c[0], c[1]);
float coherence = cMin / cMax;
Random rng = n.random();
float cur = curiosity().floatValue();
if (cur > 0 && rng.nextFloat() <= cur) {
x = (rng.nextFloat() - 0.5f) * 2f;
// float curiEvi =
// //c2w(n.confDefault(BELIEF));
// //eviMin*2;
// Math.max(c2wSafe(w2cSafe(eviMin)*2), Util.mean(c[0], c[1])); //match desire conf, min=2*minConf
c[0] = c[1] = feedbackConf;
coherence = 1f;
curious = true;
} else {
curious = false;
if (cMax < confMin) {
if (latchPreviousIfUndecided) {
x = lastX[0];
} else {
x = 0;
}
} else {
// //expectation
// float g0 = g[0]-0.5f;
// float g1 = g[1]-0.5f;
// df = 2f * ((g0) - (g1));
// // /Math.max(Math.abs(g0), Math.abs(g1));
// frequency -======================
// A. subtraction
// subtract
x = ((g[0] - g[1]));
// experimental: lessen by a factor of how equally confident each goal is
if (fair) {
// fully fair
x *= coherence;
// x *= Math.sqrt(coherence); //less sharp than linear
// semi-fair
// df *= 0.5f + 0.5f * (eMin / eMax); //reduction by at most half
}
// df *= 1f - Math.abs(e[0] - e[1]) / eMax;
// df *= Util.sqr(eMin / eMax); //more cautious
// df *= Math.min(w2cSafe(e[0]), w2cSafe(e[1])) / w2cSafe(eMax);
}
}
x = Util.clamp(x, -1f, +1f);
lastX[0] = x;
// -1..+1
float y = update.valueOf(x);
// System.out.println(x + " " + y);
// w2c(Math.abs(y) * c2w(restConf));
PreciseTruth Nb, Ng, Pb, Pg;
if (y == y) {
// y: (-1..+1)
float yp, yn;
if (Math.abs(y) >= n.freqResolution.floatValue()) {
yp = 0.5f + y / 2f;
yn = 1f - yp;
} else {
yp = yn = 0.5f;
}
// float yp = 0.5f + y/2f;
// float yn = 1f - yp;
float pbf = yp;
float nbf = yn;
Pb = $.t(pbf, feedbackConf);
Nb = $.t(nbf, feedbackConf);
// float goalEvi =
// eviMin;
// //max(eviMin, max(e[0], e[1]));
// Pg = curious || e[0] == 0 ? new PreciseTruth(yp, goalEvi, false) : null;
// Ng = curious || e[1] == 0 ? new PreciseTruth(yn, goalEvi, false) : null;
// float confBase = confMin*4; //~ alpha, learning rate
// float fThresh = Float.MIN_NORMAL;
// float yp = y > +fThresh ? Util.lerp(+y, confBase, feedbackConf) : confBase;
// float yn = y < -fThresh ? Util.lerp(-y, confBase, feedbackConf) : confBase;
// Pb = $.t(y > +fThresh ? 1 : 0, y > +fThresh ? yp : feedbackConf - yp);
// Nb = $.t(y < -fThresh ? 1 : 0, y < -fThresh ? yn : feedbackConf - yn);
// //Pg = curious || e[0] == 0 ? new PreciseTruth(1, Util.lerp(+y, confMin2, feedbackConf)) : null;
// Pg = null;
// //Ng = curious || e[1] == 0 ? new PreciseTruth(1, Util.lerp(-y, confMin2, feedbackConf)) : null;
// Ng = null;
// float fThresh = nar().freqResolution.floatValue();
// int sign = (y > fThresh ? +1 : (y < -fThresh ? -1 : 0));
//
// float feedConf =
// w2cSafe(c2wSafe(goalConf)/2f); //half/half
// //goalConf;
// //Math.max(confMin, goalConf * coherence);
// switch (sign) {
// case +1:
// //Pb = $.t(1f, Util.lerp(+y, confBase, feedbackConf));
// Pb = $.t(y/2f + 0.5f, feedConf);
// Nb =
// //null;
// $.t(0, feedConf);
// break;
// case -1:
// Pb =
// //null;
// $.t(0, feedConf);
//
// Nb = $.t(-y/2f + 0.5f, feedConf);
// break;
// case 0:
// //Pb = Nb = null; //no signal
// Pb = Nb = $.t(0, feedConf);
// //Math.max(confMin, feedConf);
// //w2cSafe(c2wSafe(feedConf)/2f))); //zero
// break;
// default:
// throw new UnsupportedOperationException();
// }
Pg = null;
Ng = null;
// if (curious) {
// e[0] = e[1] = 0; //reset to get full evidence override
// }
// float g0 = eviMax - e[0];
// Pg = g0 >= eviMin ? new PreciseTruth(yp, g0, false) : null;
// float g1 = eviMax - e[1];
// Ng = g1 >= eviMin ? new PreciseTruth(yn, g1, false) : null;
} else {
Pb = Nb = Pg = Ng = null;
}
// System.out.println(Pb + "," + Nb + " <- " + g[0] + ";" + c[0] + ", " + g[1] + ';' + c[1]);
CC[0].feedback(Pb, Pg, n);
CC[1].feedback(Nb, Ng, n);
}
};
CauseChannel<ITask> cause = nar().newCauseChannel(s);
GoalActionAsyncConcept p = new GoalActionAsyncConcept(pt, this, cause, u);
GoalActionAsyncConcept n = new GoalActionAsyncConcept(nt, this, cause, u);
addAction(p);
addAction(n);
nar().believe($.inh(s, SECTe.the(PLUS, NEG)).neg(), Tense.Eternal);
CC[0] = p;
CC[1] = n;
return CC;
}
use of nars.time.Tense.ETERNAL in project narchy by automenta.
the class DeriveTime method solveMerged.
@Nullable
Function<long[], Term> solveMerged(ArrayHashSet<Event> solutions, int dur) {
int ss = solutions.size();
// callee will use the only solution by default
if (ss <= 1)
return null;
SortedSetMultimap<Term, LongLongPair> m = MultimapBuilder.hashKeys(ss).treeSetValues().build();
solutions.forEach(x -> {
long w = x.when();
if (w != TIMELESS)
m.put(x.id, PrimitiveTuples.pair(w, w));
});
int ms = m.size();
switch(ms) {
case 0:
return null;
case 1:
Map.Entry<Term, LongLongPair> ee = m.entries().iterator().next();
LongLongPair ww = ee.getValue();
long s = ww.getOne();
long e = ww.getTwo();
return (w) -> {
w[0] = s;
w[1] = e;
return ee.getKey();
};
}
FasterList<Pair<Term, long[]>> choices = new FasterList(ms);
// coalesce adjacent events
m.asMap().forEach((t, cw) -> {
int cws = cw.size();
if (cws > 1) {
long[][] ct = new long[cws][2];
int i = 0;
for (LongLongPair p : cw) {
long[] cc = ct[i++];
cc[0] = p.getOne();
cc[1] = p.getTwo();
}
// TODO more complete comparison
long[] prev = ct[0];
for (int j = 1; j < cws; j++) {
long[] next = ct[j];
if (prev[0] == ETERNAL) {
assert (j == 1);
assert (ct[0][0] == ETERNAL);
// ignore eternal solution amongst other temporal solutions
ct[0] = null;
} else if (Math.abs(prev[1] - next[0]) <= dur) {
// stretch
prev[1] = next[1];
ct[j] = null;
continue;
}
prev = next;
}
for (int j = 0; j < cws; j++) {
long[] nn = ct[j];
if (nn != null)
choices.add(pair(t, nn));
}
} else {
LongLongPair f = ((SortedSet<LongLongPair>) cw).first();
choices.add(pair(t, new long[] { f.getOne(), f.getTwo() }));
}
});
if (choices.size() > 1) {
// random fallback
return (w) -> {
Pair<Term, long[]> pp = choices.get(d.random);
long[] cw = pp.getTwo();
w[0] = cw[0];
w[1] = cw[1];
return pp.getOne();
};
} else {
Pair<Term, long[]> c = choices.get(0);
long[] cw = c.getTwo();
Term cct = c.getOne();
return (w) -> {
w[0] = cw[0];
w[1] = cw[1];
return cct;
};
}
}
use of nars.time.Tense.ETERNAL in project narchy by automenta.
the class NAR method stats.
/**
* creates a snapshot statistics object
* TODO extract a Method Object holding the snapshot stats with the instances created below as its fields
*/
public SortedMap<String, Object> stats() {
LongSummaryStatistics beliefs = new LongSummaryStatistics();
LongSummaryStatistics goals = new LongSummaryStatistics();
LongSummaryStatistics questions = new LongSummaryStatistics();
LongSummaryStatistics quests = new LongSummaryStatistics();
Histogram termlinkCount = new Histogram(1);
Histogram tasklinkCount = new Histogram(1);
// Frequency complexity = new Frequency();
HashBag clazz = new HashBag();
HashBag policy = new HashBag();
HashBag rootOp = new HashBag();
ShortCountsHistogram volume = new ShortCountsHistogram(2);
// AtomicInteger i = new AtomicInteger(0);
// LongSummaryStatistics termlinksCap = new LongSummaryStatistics();
// LongSummaryStatistics tasklinksCap = new LongSummaryStatistics();
SortedMap<String, Object> x = new TreeMap();
synchronized (exe) {
concepts().filter(xx -> !(xx instanceof Functor)).forEach(c -> {
// complexity.addValue(c.complexity());
volume.recordValue(c.volume());
rootOp.add(c.op());
clazz.add(c.getClass().toString());
ConceptState p = c.state();
policy.add(p != null ? p.toString() : "null");
// termlinksCap.accept(c.termlinks().capacity());
termlinkCount.recordValue(c.termlinks().size());
// tasklinksCap.accept(c.tasklinks().capacity());
tasklinkCount.recordValue(c.tasklinks().size());
beliefs.accept(c.beliefs().size());
goals.accept(c.goals().size());
questions.accept(c.questions().size());
quests.accept(c.quests().size());
});
// x.put("time real", new Date());
if (loop.isRunning()) {
loop.stats("loop", x);
}
x.put("time", time());
// x.put("term index", terms.summary());
x.put("concept count", concepts.size());
}
x.put("belief count", ((double) beliefs.getSum()));
x.put("goal count", ((double) goals.getSum()));
Util.decode(tasklinkCount, "tasklink count", 4, x::put);
// x.put("tasklink usage", ((double) tasklinkCount.getTotalCount()) / tasklinksCap.getSum());
x.put("tasklink total", ((double) tasklinkCount.getTotalCount()));
Util.decode(termlinkCount, "termlink count", 4, x::put);
// x.put("termlink usage", ((double) termlinkCount.getTotalCount()) / termlinksCap.getSum());
x.put("termlink total", ((double) termlinkCount.getTotalCount()));
// DoubleSummaryStatistics pos = new DoubleSummaryStatistics();
// DoubleSummaryStatistics neg = new DoubleSummaryStatistics();
// causes.forEach(c -> pos.accept(c.pos()));
// causes.forEach(c -> neg.accept(c.neg()));
// x.put("value count", pos.getCount());
// x.put("value pos mean", pos.getAverage());
// x.put("value pos min", pos.getMin());
// x.put("value pos max", pos.getMax());
// x.put("value neg mean", neg.getAverage());
// x.put("value neg min", neg.getMin());
// x.put("value neg max", neg.getMax());
// x.put("volume mean", volume.);
//
// x.put("termLinksCapacity", termlinksCap);
// x.put("taskLinksUsed", tasklinksUsed);
// x.put("taskLinksCapacity", tasklinksCap);
Util.toMap(policy, "concept state", x::put);
Util.toMap(rootOp, "concept op", x::put);
Util.decode(volume, "concept volume", 4, x::put);
Util.toMap(clazz, "concept class", x::put);
x.put("term cache (eternal)", Op.cache.summary());
x.put("term cache (temporal)", Op.cacheTemporal.summary());
return x;
}
Aggregations