use of jcog.pri.VLink in project narchy by automenta.
the class ConjClustering method conjoinCentroid.
// /**
// * produces a parallel conjunction term consisting of all the task's terms
// */
// public Stream<List<Task>> chunk(Stream<Task> input, int maxComponentsPerTerm, int maxVolume) {
// final int[] group = {0};
// final int[] subterms = {0};
// final int[] currentVolume = {0};
// final float[] currentConf = {1};
// return input.filter(x -> !x.isDeleted())
// .collect(Collectors.groupingBy(x -> {
//
// int v = x.volume();
// float c = x.conf();
//
// if ((subterms[0] >= maxComponentsPerTerm) || (currentVolume[0] + v >= maxVolume) || (currentConf[0] * c < confMin)) {
// //next group
// group[0]++;
// subterms[0] = 1;
// currentVolume[0] = v;
// currentConf[0] = c;
// } else {
// subterms[0]++;
// currentVolume[0] += v;
// currentConf[0] *= c;
// }
//
// return group[0];
// }))
// .entrySet().stream()
// .map(c -> {
// List<Task> v = c.getValue();
// return c.getKey() >= 0 && //only batches of >1
// v.size() > 1 ? v : null; //ignore the -1 discard group
// })
// .filter(Objects::nonNull);
//
// }
// static final BiFunction<Task, Task, Task> termPointMerger = (prevZ, newZ) -> ((prevZ == null) || (newZ.conf() >= prevZ.conf())) ?
// newZ : prevZ;
private List<Task> conjoinCentroid(Stream<VLink<Task>> group, Pair<NAR, List<Task>> narAndTarget) {
NAR nar = narAndTarget.getOne();
// get only the maximum confidence task for each term at its given starting time
// in.input(
// chunk(group.filter(Objects::nonNull).takeWhile(kontinue)
// .map(x -> x.id), maxConjSize, volMax).takeWhile(kontinue).map(tasks -> {
Iterator<VLink<Task>> gg = group.filter(x -> x != null && !x.isDeleted()).iterator();
// Iterators.peekingIterator();
Map<LongObjectPair<Term>, Task> vv = new HashMap<>();
FasterList<Task> actualTasks = new FasterList();
int centroidGen = 0;
List<Task> gen = narAndTarget.getTwo();
main: while (gg.hasNext() && centroidGen < taskLimitPerCentroid) {
vv.clear();
actualTasks.clear();
long end = Long.MIN_VALUE;
long start = Long.MAX_VALUE;
int dur = nar.dur();
float freq = 1f;
float conf = 1f;
float priMax = Float.NEGATIVE_INFINITY, priMin = Float.POSITIVE_INFINITY;
int vol = 0;
int maxVolume = 0;
do {
if (!gg.hasNext())
break;
Task t = gg.next().id;
// gg.peek().id;
Term xt = t.term();
long zs = Tense.dither(t.start(), ditherTime);
long ze = Tense.dither(t.end(), ditherTime);
// assert (end >= start);
Truth tx = t.truth();
Term xtn = xt.neg();
if (tx.isNegative()) {
xt = xtn;
}
int xtv = xt.volume();
maxVolume = Math.max(maxVolume, xt.volume());
if (vol + xtv + 1 >= volMax || conf * tx.conf() < confMin) {
// cant go any further with this task
continue;
}
boolean involved = false;
LongObjectPair<Term> ps = pair(zs, xt);
Term xtNeg = xt.neg();
if (!vv.containsKey(pair(zs, xtNeg)) && null == vv.putIfAbsent(ps, t)) {
vol += xtv;
if (start > zs)
start = zs;
if (end < zs)
end = zs;
involved = true;
}
if (ze - zs >= dur) {
// endpoint
if (vol + xtv + 1 < volMax) {
LongObjectPair<Term> pe = pair(ze, xt);
if (!vv.containsKey(pair(ze, xtNeg)) && null == vv.putIfAbsent(pe, t)) {
// end point, if different from start
vol += xtv;
if (end < ze)
end = ze;
involved = true;
}
}
}
if (involved) {
actualTasks.add(t);
conf *= tx.conf();
float tf = tx.freq();
// since it will appear as a negated subterm
freq *= tx.isNegative() ? (1f - tf) : tf;
float p = t.priElseZero();
if (p < priMin)
priMin = p;
if (p > priMax)
priMax = p;
}
} while (vol < volMax - 1 && conf > confMin);
int vs = actualTasks.size();
if (vs < 2)
continue;
// the tasks which are actually involved
Task[] uu = actualTasks.toArrayRecycled(Task[]::new);
// TODO discount based on evidential overlap? needs N-way overlapFraction function
ObjectFloatPair<long[]> evidence = Stamp.zip(actualTasks, Param.STAMP_CAPACITY);
float overlap = evidence.getTwo();
float e = c2w(conf) * Param.overlapFactor(overlap);
if (e > 0) {
final Truth t = Truth.theDithered(freq, e, nar);
if (t != null) {
Term cj = Conj.conj(vv.keySet());
if (cj != null) {
cj = cj.normalize();
if (Math.abs(cj.dtRange() - (end - start)) < ditherTime) {
// test if merge collapse occurred and occurrence time needs recalculated
ObjectBooleanPair<Term> cp = Task.tryContent(cj, punc, true);
if (cp != null) {
// TODO use a truth calculated specific to this fixed-size batch, not all the tasks combined
NALTask m = new STMClusterTask(cp, t, start, start, evidence.getOne(), punc, now);
// if (evidence.getTwo() > 0) m.setCyclic(true);
m.cause = Cause.sample(Param.causeCapacity.intValue(), uu);
float p = // priMax;
priMin;
// (priMin + priMax) / 2f;
// complexity deduction
// how much more complex the conjunction is than the most complex of its ingredients
int v = cp.getOne().volume();
float cmplFactor = ((float) v) / (v + maxVolume);
m.priSet(Priority.fund(p * cmplFactor, true, uu));
gen.add(m);
centroidGen++;
}
} else {
// System.out.println("merge collapse, recalcu");
}
}
}
}
}
return gen.isEmpty() ? null : gen;
}
use of jcog.pri.VLink in project narchy by automenta.
the class ChainClustering method linkClustersChain.
protected void linkClustersChain(Stream<VLink<Task>> sortedByCentroidStream, NAR nar) {
List<VLink<Task>> sortedbyCentroid = sortedByCentroidStream.collect(Collectors.toList());
int current = -1;
int nTasks = sortedbyCentroid.size();
VLink<Task> x = null;
for (int i = 0; i < nTasks; i++) {
VLink<Task> y = sortedbyCentroid.get(i);
if (y == null)
continue;
if (y.centroid != current) {
current = y.centroid;
} else {
// link to previous item
Task tx = x.get();
Task ty = y.get();
link(tx, ty);
}
x = y;
}
}
Aggregations