use of nars.task.util.TaskRegion in project narchy by automenta.
the class Revision method mergeTemporal.
/**
* assumes:
* the tasks to be sorted in descending strength
* the input minEvi corresponds to the absolute minimum accepted
* evidence integral (evidence * time) for a point-like result (dtRange == 0)
*/
@Nullable
static Task mergeTemporal(float eviMinInteg, NAR nar, int results, TaskRegion... tt) {
assert (tt.length > 1);
Task first = tt[0].task();
// TODO calculate the temporal density at the same time as this first part to avoid naively generating a sparse result afterward
// TODO combine evidensity with the stamp calculation
// TODO allow evidensity to skip a task in the array and proceed to the next without recurse
LongHashSet evidence = new LongHashSet();
int overlap = 0, totalEv = 0;
int tasks = 0;
boolean termSame = true;
EviDensity density = new EviDensity();
for (int i = 0; i < results; i++) {
TaskRegion ri = tt[i];
if (ri == null)
continue;
Task ti = ri.task();
// assert (!t.isEternal());
long[] ts = ti.stamp();
totalEv += ts.length;
int overlapsToAdd;
if (tasks == 0) {
overlapsToAdd = 0;
} else {
overlapsToAdd = Stamp.overlaps(evidence, ts);
if (overlapsToAdd > 0) {
if (totalEv > 0 && Param.overlapFactor(overlap + overlapsToAdd) < Float.MIN_NORMAL) {
/* current amount so far */
// would cause zero evidence, regardless of whatever truth is calculated later
tt[i] = null;
// skip this one
continue;
}
} else {
if (termSame) {
Term termI = ti.term();
if ((i > 0) && !termI.equals(first.term())) {
if (tasks == 1 && termI.op() != CONJ) /* dont do CONJ now because it might produce an occurrence shift which isnt tracked yet */
{
// limit termpolation to max 2 tasks for now
// difference in terms
termSame = false;
// TODO loop dont recurse, just buffer the accumulated evidence changes to the end of the loop
tt = new TaskRegion[] { first, ri };
// cause the for-loop to end after this iteration
i = results;
// continue
} else {
// skip this term, it would conflict with the other 2+ terms which are already known to be the same
tt[i] = null;
// skip this one
continue;
}
}
}
}
}
density.add(ri);
evidence.addAll(ts);
if (tasks > 1)
// because it may be compared against frequently
evidence.compact();
overlap += overlapsToAdd;
tasks++;
}
if (tasks == 1) {
// return the top one, nothing could be merged
return first;
}
// dont settle for anything worse than the first (strongest) task by un-revised
eviMinInteg = Math.max(first.eviInteg(), eviMinInteg);
float overlapFactor = Param.overlapFactor(((float) overlap) / totalEv);
if (overlapFactor < Float.MIN_NORMAL)
return first;
float densityFactor = density.factor();
if (tasks != tt.length)
tt = ArrayUtils.removeNulls(tt, Task[]::new);
long start = density.unionStart;
long end = density.unionEnd;
long range = 1 + (end - start);
Term content;
float differenceFactor = 1f;
if (!termSame) {
Task second = tt[1].task();
float diff = dtDiff(first.term(), second.term());
if (!Float.isFinite(diff))
// impossible
return null;
if (diff > 0)
// proport
differenceFactor = (float) Param.evi(1f, diff, Math.max(1, range));
float e1 = first.eviInteg();
float e2 = second.eviInteg();
float firstProp = e1 / (e1 + e2);
content = intermpolate(first.term(), second.term(), firstProp, nar);
if (!Task.validTaskTerm(content))
return first;
} else {
content = first.term();
}
int dur = nar.dur();
Truth truth = new TruthPolation(start, end, dur, tt).truth(true);
if (truth == null)
return first;
float factor = overlapFactor * differenceFactor * densityFactor;
float eAdjusted = truth.evi() * factor;
if ((eAdjusted * range) < eviMinInteg)
return first;
Task t = Task.tryTask(content, first.punc(), truth, (c, tr) -> {
@Nullable PreciseTruth cTruth = tr.dither(nar, factor);
if (cTruth == null)
return null;
return new NALTask(c, first.punc(), cTruth, nar.time(), start, end, Stamp.sample(Param.STAMP_CAPACITY, evidence, /* TODO account for relative evidence contributions */
nar.random()));
});
if (t == null)
return first;
t.priSet(Priority.fund(Util.max((TaskRegion p) -> p.task().priElseZero(), tt), false, Tasked::task, tt));
((NALTask) t).cause = Cause.sample(Param.causeCapacity.intValue(), tt);
if (Param.DEBUG)
t.log("Temporal Merge");
for (TaskRegion x : tt) {
// forward to the revision
x.task().meta("@", (k) -> t);
}
return t;
}
use of nars.task.util.TaskRegion in project narchy by automenta.
the class Revision method mergeTemporal.
@Nullable
public static Task mergeTemporal(NAR nar, long start, long end, FasterList<TaskRegion> tt) {
// filter the task set:
// if there are any exact matches to the interval, remove any others
RoaringBitmap oob = new RoaringBitmap();
for (int i = 0, ttSize = tt.size(); i < ttSize; i++) {
TaskRegion x = tt.get(i);
if (x == null || !x.intersects(start, end))
oob.add(i);
}
int numRemoved = oob.getCardinality();
if (numRemoved != 0 && numRemoved != tt.size()) {
IntIterator ii = oob.getReverseIntIterator();
while (ii.hasNext()) {
tt.remove(ii.next());
}
}
return mergeTemporal(nar, tt.array(), tt.size());
}
use of nars.task.util.TaskRegion in project narchy by automenta.
the class RTreeBeliefTable method findEvictable.
private static boolean findEvictable(Space<TaskRegion> tree, Node<TaskRegion, ?> next, Top2<Leaf<TaskRegion>> mergeVictims) {
if (next instanceof Leaf) {
Leaf l = (Leaf) next;
for (Object _x : l.data) {
if (_x == null)
// end of list
break;
TaskRegion x = (TaskRegion) _x;
if (((Deleteable) x).isDeleted()) {
// found a deleted task in the leaf, we need look no further
boolean removed = tree.remove(x);
// }
assert (removed);
return false;
}
}
mergeVictims.accept(l);
} else {
// if (next instanceof Branch)
Branch b = (Branch) next;
for (Node ww : b.data) {
if (ww == null)
// end of list
break;
else if (!findEvictable(tree, ww, mergeVictims))
return false;
}
}
return true;
}
use of nars.task.util.TaskRegion in project narchy by automenta.
the class RTreeBeliefTable method mergeOrDelete.
private boolean mergeOrDelete(Space<TaskRegion> treeRW, Top2<Leaf<TaskRegion>> l, FloatFunction<Task> taskStrength, float inputStrength, FloatFunction<TaskRegion> weakestTasks, Consumer<Tasked> added, NAR nar) {
TaskRegion a, b;
Leaf<TaskRegion> la = l.a;
short sa = la.size;
if (sa > 2) {
Top2<TaskRegion> w = new Top2<>(weakestTasks);
la.forEach(w::add);
a = w.a;
b = w.b;
} else if (sa == 2) {
a = la.get(0);
b = la.get(1);
} else {
a = la.get(0);
Leaf<TaskRegion> lb = l.b;
if (lb != null) {
int sb = lb.size();
if (sb > 1) {
Top<TaskRegion> w = new Top<>(weakestTasks);
lb.forEach(w);
b = w.the;
} else if (sb == 1) {
b = lb.get(0);
} else {
// ??
b = null;
}
} else {
b = null;
}
}
assert (a != null);
Task at = a.task();
float aPri = at.pri();
treeRW.remove(at);
if (b != null) {
Task bt = b.task();
if (bt.isDeleted()) {
treeRW.remove(bt);
return true;
} else {
at.meta("@", bt);
}
if (// already deleted
aPri != aPri)
return true;
Task c = // HACK
(this instanceof Simple || (at.term().equals(bt.term()))) ? Revision.mergeTemporal(nar, at, bt) : // TODO remove this when the mergeTemporal fully supports CONJ and Temporal
Revision.merge(at, bt, nar.time(), c2wSafe(nar.confMin.floatValue()), nar);
if (c != null && !c.equals(a) && !c.equals(b)) {
boolean allowMerge;
if (inputStrength != inputStrength) {
allowMerge = true;
} else {
float strengthRemoved = taskStrength.floatValueOf(at) + taskStrength.floatValueOf(bt);
float strengthAdded = taskStrength.floatValueOf(c) + inputStrength;
allowMerge = strengthAdded >= strengthRemoved;
}
if (allowMerge) {
treeRW.remove(bt);
// forward
((NALTask) at).delete(c);
// forward
((NALTask) bt).delete(c);
if (treeRW.add(c))
added.accept(c);
return true;
} else {
// merge result is not strong enough
c.delete();
}
}
}
// TODO do this outside of the locked section
if (Param.ETERNALIZE_EVICTED_TEMPORAL_TASKS)
eternalize(at, added, nar);
at.delete();
return true;
}
Aggregations