use of nars.concept.Concept in project narchy by automenta.
the class Recog2D method conceptTraining.
Surface conceptTraining(BeliefVector tv, NAR nar) {
// LinkedHashMap<TaskConcept, BeliefVector.Neuron> out = tv.out;
Plot2D p;
int history = 256;
Gridding g = new Gridding(p = new Plot2D(history, Plot2D.Line).add("Reward", () -> reward), new AspectAlign(new CameraSensorView(sp, this), AspectAlign.Align.Center, sp.width, sp.height), new Gridding(beliefTableCharts(nar, List.of(tv.concepts), 16)), new Gridding(IntStream.range(0, tv.concepts.length).mapToObj(i -> new spacegraph.space2d.widget.text.Label(String.valueOf(i)) {
@Override
protected void paintBelow(GL2 gl) {
Concept c = tv.concepts[i];
BeliefVector.Neuron nn = tv.neurons[i];
float freq, conf;
Truth t = nar.beliefTruth(c, nar.time());
if (t != null) {
conf = t.conf();
freq = t.freq();
} else {
conf = nar.confMin.floatValue();
float defaultFreq = // interpret no-belief as maybe
0.5f;
// Float.NaN //use NaN to force learning of negation as separate from no-belief
freq = defaultFreq;
}
Draw.colorBipolar(gl, 2f * (freq - 0.5f));
float m = 0.5f * conf;
Draw.rect(gl, bounds);
if (tv.verify) {
float error = nn.error;
if (error != error) {
// training phase
// Draw.rect(gl, m / 2, m / 2, 1 - m, 1 - m);
} else {
// verification
// draw backgroudn/border
// gl.glColor3f(error, 1f - error, 0f);
//
// float fontSize = 0.08f;
// gl.glColor3f(1f, 1f, 1f);
// Draw.text(gl, c.term().toString(), fontSize, m / 2, 1f - m / 2, 0);
// Draw.text(gl, "err=" + n2(error), fontSize, m / 2, m / 2, 0);
}
}
}
}).toArray(Surface[]::new)));
final int[] frames = { 0 };
onFrame(() -> {
if (frames[0]++ % imagePeriod == 0) {
nextImage();
}
redraw();
// if (neural.get()) {
// if (nar.time() < trainFrames) {
outs.expect(image);
if (neural.get()) {
train.update(mlpLearn, mlpSupport);
}
p.update();
// s.update();
});
return g;
}
use of nars.concept.Concept in project narchy by automenta.
the class NAR method stats.
/**
* creates a snapshot statistics object
* TODO extract a Method Object holding the snapshot stats with the instances created below as its fields
*/
public SortedMap<String, Object> stats() {
LongSummaryStatistics beliefs = new LongSummaryStatistics();
LongSummaryStatistics goals = new LongSummaryStatistics();
LongSummaryStatistics questions = new LongSummaryStatistics();
LongSummaryStatistics quests = new LongSummaryStatistics();
Histogram termlinkCount = new Histogram(1);
Histogram tasklinkCount = new Histogram(1);
// Frequency complexity = new Frequency();
HashBag clazz = new HashBag();
HashBag policy = new HashBag();
HashBag rootOp = new HashBag();
ShortCountsHistogram volume = new ShortCountsHistogram(2);
// AtomicInteger i = new AtomicInteger(0);
// LongSummaryStatistics termlinksCap = new LongSummaryStatistics();
// LongSummaryStatistics tasklinksCap = new LongSummaryStatistics();
SortedMap<String, Object> x = new TreeMap();
synchronized (exe) {
concepts().filter(xx -> !(xx instanceof Functor)).forEach(c -> {
// complexity.addValue(c.complexity());
volume.recordValue(c.volume());
rootOp.add(c.op());
clazz.add(c.getClass().toString());
ConceptState p = c.state();
policy.add(p != null ? p.toString() : "null");
// termlinksCap.accept(c.termlinks().capacity());
termlinkCount.recordValue(c.termlinks().size());
// tasklinksCap.accept(c.tasklinks().capacity());
tasklinkCount.recordValue(c.tasklinks().size());
beliefs.accept(c.beliefs().size());
goals.accept(c.goals().size());
questions.accept(c.questions().size());
quests.accept(c.quests().size());
});
// x.put("time real", new Date());
if (loop.isRunning()) {
loop.stats("loop", x);
}
x.put("time", time());
// x.put("term index", terms.summary());
x.put("concept count", concepts.size());
}
x.put("belief count", ((double) beliefs.getSum()));
x.put("goal count", ((double) goals.getSum()));
Util.decode(tasklinkCount, "tasklink count", 4, x::put);
// x.put("tasklink usage", ((double) tasklinkCount.getTotalCount()) / tasklinksCap.getSum());
x.put("tasklink total", ((double) tasklinkCount.getTotalCount()));
Util.decode(termlinkCount, "termlink count", 4, x::put);
// x.put("termlink usage", ((double) termlinkCount.getTotalCount()) / termlinksCap.getSum());
x.put("termlink total", ((double) termlinkCount.getTotalCount()));
// DoubleSummaryStatistics pos = new DoubleSummaryStatistics();
// DoubleSummaryStatistics neg = new DoubleSummaryStatistics();
// causes.forEach(c -> pos.accept(c.pos()));
// causes.forEach(c -> neg.accept(c.neg()));
// x.put("value count", pos.getCount());
// x.put("value pos mean", pos.getAverage());
// x.put("value pos min", pos.getMin());
// x.put("value pos max", pos.getMax());
// x.put("value neg mean", neg.getAverage());
// x.put("value neg min", neg.getMin());
// x.put("value neg max", neg.getMax());
// x.put("volume mean", volume.);
//
// x.put("termLinksCapacity", termlinksCap);
// x.put("taskLinksUsed", tasklinksUsed);
// x.put("taskLinksCapacity", tasklinksCap);
Util.toMap(policy, "concept state", x::put);
Util.toMap(rootOp, "concept op", x::put);
Util.decode(volume, "concept volume", 4, x::put);
Util.toMap(clazz, "concept class", x::put);
x.put("term cache (eternal)", Op.cache.summary());
x.put("term cache (temporal)", Op.cacheTemporal.summary());
return x;
}
use of nars.concept.Concept in project narchy by automenta.
the class TaskLinkCurveBag method compress.
private void compress(Conj e, byte punc, NAR nar) {
int maxVol = nar.termVolumeMax.intValue();
e.event.forEachKeyValue((when, what) -> {
int eventCount = Conj.eventCount(what);
if (eventCount > 1 && eventCount < MAX_EVENTS) /* TODO allow choosing subset of events from a single time */
{
Term c = e.term(when);
if (c.volume() < maxVol && Task.validTaskTerm(c, punc, true)) {
FasterList<TaskLink.GeneralTaskLink> removed = new FasterList(eventCount);
final float[] pri = { 0 };
synchronized (items) {
e.forEachTerm(what, (Term t) -> {
TaskLink.GeneralTaskLink key = new TaskLink.GeneralTaskLink(t, punc, when, 0);
TaskLink.GeneralTaskLink r = (TaskLink.GeneralTaskLink) remove(key);
if (r != null) {
pri[0] += r.priElseZero();
removed.addIfNotNull(r);
}
});
if (removed.size() > 1) {
Concept cc = nar.conceptualize(c);
if (cc != null) {
put(new TaskLink.GeneralTaskLink(c, punc, Tense.dither(when, nar), pri[0]));
// SUCCESS
return;
}
}
// FAIL
// re-insert, sorry to waste your time
removed.forEach(this::put);
}
}
}
});
}
use of nars.concept.Concept in project narchy by automenta.
the class TermlinkTemplates method activate.
public void activate(Concept src, float budgeted, NAR nar) {
int n = this.size();
if (n == 0)
return;
float budgetedToEach = budgeted / n;
// if (budgetedToEach < Pri.EPSILON)
// return;
MutableFloat refund = new MutableFloat(0);
// int nextTarget = nar.random().nextInt(n);
Term srcTerm = src.term();
Bag<Term, PriReference<Term>> srcTermLinks = src.termlinks();
float balance = nar.termlinkBalance.floatValue();
for (int i = 0; i < n; i++) {
Term tgtTerm = get(i);
boolean reverseLinked = false;
boolean conceptualizable = i < concepts;
if (conceptualizable) {
@Nullable Concept tgt = nar.conceptualize(tgtTerm);
float budgetedForward = budgetedToEach * (1f - balance);
float budgetedReverse = budgetedToEach * balance;
if (tgt != null) {
// insert termlink
tgt.termlinks().put(new PLink<>(srcTerm, budgetedForward), refund);
nar.activate(tgt, // budgetedToEach
budgetedForward);
reverseLinked = true;
// use the concept's id
tgtTerm = tgt.term();
}
if (!reverseLinked)
refund.add(budgetedForward);
((Bag) srcTermLinks).put(new PLink(tgtTerm, budgetedReverse), refund);
}
// float r = refund.floatValue();
// float cost = budgeted - r;
// return cost;
}
}
use of nars.concept.Concept in project narchy by automenta.
the class QuerySpider method spider.
/**
* resource-constrained breadth first search
*/
private AdjGraph<Term, Float> spider(NAR nar, Termed t, AdjGraph<Term, Float> g, int recurse) {
Term tt = t.term();
if (tt.op().conceptualizable && g.addIfNew(tt) && recurse > 0) {
Concept c = nar.conceptualize(t);
if (c == null)
return g;
Bag<Term, PriReference<Term>> tl = c.termlinks();
if (!tl.isEmpty()) {
TermGraph.termlink(nar, tl.stream().map(PriReference::get), g);
} else {
TermGraph.termlink(nar, c.templates().stream(), g);
}
g.nodes.forEachKey(k -> {
spider(nar, k.v, g, recurse - 1);
});
} else {
g.addNode(tt);
}
return g;
}
Aggregations