use of nars.nal.entity.Concept in project narchy by automenta.
the class Predict3 method main.
public static void main(String[] args) throws Narsese.InvalidInputException, InterruptedException {
Parameters.DEBUG = true;
int duration = 8;
float freq = 1.0f / duration * 0.15f;
int minCyclesAhead = 0;
double missingDataRate = 0.1;
double noiseRate = 0.02;
boolean onlyNoticeChange = false;
int thinkInterval = onlyNoticeChange ? 1 : 2;
int discretization = 3;
NAR n = new NAR(new Default().setInternalExperience(null));
n.param.duration.set(duration);
// n.param.duration.setLinear(0.5);
n.param.conceptBeliefsMax.set(64);
// n.param.conceptForgetDurations.set(16);
Discretize discretize = new Discretize(n, discretization);
TreeMLData observed = new TreeMLData("value", Color.WHITE).setRange(0, 1f);
TreeMLData[] predictions = new TreeMLData[discretization];
TreeMLData[] reflections = new TreeMLData[discretization];
for (int i = 0; i < predictions.length; i++) {
predictions[i] = new TreeMLData("Pred" + i, Color.getHSBColor(0.25f + i / 4f, 0.85f, 0.85f));
// predictions[i].setDefaultValue(0.0);
reflections[i] = new TreeMLData("Refl" + i, Color.getHSBColor(0.25f + i / 4f, 0.85f, 0.85f));
reflections[i].setDefaultValue(0.0);
// predictions[i].setRange(0, 0.5);
}
TimelineVis tc = new TimelineVis(new LineChart(observed).thickness(16f).height(128), new LineChart(predictions).thickness(16f).height(128), new LineChart(reflections).thickness(16f).height(128));
// new BarChart(error).height(4)
new NWindow("_", new PCanvas(tc)).show(800, 800, true);
new TextOutput(n, System.out) {
/**
* dt = relative to center
*/
public double getPredictionEnvelope(double dt, double duration) {
// guassian curve width=duration
// e^(-(4*x/(dur))^2)
double p = (4 * dt / duration);
return Math.exp(-(p * p));
}
/**
* only allow future predictions
*/
protected boolean allowTask(Task t) {
if (t.sentence.isEternal()) {
return false;
}
boolean future = false;
if ((t.sentence.getOccurenceTime() > n.time() + minCyclesAhead)) {
System.out.print(n.time() + ".." + t.sentence.getOccurenceTime() + ": ");
future = true;
}
Term term = t.getTerm();
int time = (int) t.sentence.getOccurenceTime();
int value = -1;
float conf = t.sentence.truth.getConfidence();
float expect = 2f * (t.sentence.truth.getFrequency() - 0.5f) * conf;
String ts = term.toString();
if (ts.startsWith("<x_t0 --> y")) {
char cc = ts.charAt("<x_t0 --> y".length());
value = cc - '0';
}
if (value != -1) {
// predictions[(int)value].addPlus(time, expect);
for (int tt = time - duration / 2; tt <= time + duration / 2; tt++) {
double smooth = 1;
expect *= getPredictionEnvelope(time - tt, smooth * duration * 2f);
/*
if (future)
predictions[value].addPlus(tt, expect);
else
reflections[value].addPlus(tt, expect);
*/
}
}
return true;
}
};
for (Term t : discretize.getValueTerms("x")) n.believe(t.toString(), Tense.Present, 0.5f, 0.5f);
// TODO move this to discretize.getDisjunctionBelief
n.believe("<(||,y0,y1,y2,y3,y4,y5,y6,y7) --> y>", Tense.Eternal, 1.0f, 0.95f);
n.run(discretization * 4);
// new TextOutput(n, System.out);
Concept[] valueBeliefConcepts = discretize.getValueConcepts("x");
NARSwing.themeInvert();
new NWindow("x", new ConceptsPanel(n, valueBeliefConcepts)).show(900, 600, true);
RNNBeliefPrediction predictor = new RNNBeliefPrediction(n, valueBeliefConcepts) {
@Override
public double[] getTrainedPrediction(double[] input) {
return input;
}
@Override
public int getPredictionSize() {
return getInputSize();
// return 1;
}
@Override
protected double[] predict() {
double[] x = super.predict();
if (x == null)
return null;
long t = n.time();
for (int i = 0; i < x.length; i++) {
// - x[i*2+1]);
predictions[i].add((int) t, x[i]);
}
return x;
}
};
// new NARSwing(n);
int prevY = -1, curY = -1;
long prevT = n.time();
while (true) {
n.run(thinkInterval);
Thread.sleep(3);
// n.memory.addSimulationTime(1);
signal = (float) Math.max(0, Math.min(1.0, Math.tan(freq * n.time()) * 0.5f + 0.5f));
// signal = (float)Math.sin(freq * n.time()) * 0.5f + 0.5f;
// signal = ((float) Math.sin(freq * n.time()) > 0 ? 1f : -1f) * 0.5f + 0.5f;
signal *= 1.0 + (Math.random() - 0.5f) * 2f * noiseRate;
if (Math.random() > missingDataRate)
observed.add((int) n.time(), signal);
prevY = curY;
curY = discretize.i(signal);
if ((curY == prevY) && (onlyNoticeChange)) {
continue;
}
discretize.believe("x", signal, 0);
// input(prevT, Term.get("x_t0"), prevY, Term.get("x_t0"), curY, 1f, n.memory);
// input(prevT, Term.get("x_t0"), prevY, Term.get("x_t0"), 1f-curY, 0.5f, n.memory);
// n.addInput("notice(x_t0," + value + ",(||,y0,y1))!");
// input(prevT, Term.get("x_tMin1"), prevY, Term.get("x_t0"), 1f-y, 0f, n.memory);
// input(Term.get("x_t0"), Term.get(value), 0.0f, 0.0f, n.memory); //input(Term.get("x_t0"), Term.get(otherValue), 1.0f, 0f, n.memory); */
/*
n.believe(xFuncEq0, Tense.Present, 1.0f, y);
n.believe(xFuncEq0, Tense.Present, 0.0f, 1f - y);
n.believe(xFuncEq1, Tense.Present, 1.0f, 1f - y);
n.believe(xFuncEq1, Tense.Present, 0.0f, y);
*/
prevT = n.time();
}
}
use of nars.nal.entity.Concept in project narchy by automenta.
the class RNNBeliefPrediction method train.
@Override
protected void train() {
//
// double[] target = {((data[x(i1)] + data[x(i2)])/2.0)};
// new Sample(data, target, 2, length, 1, 1);
TreeMap<Integer, double[]> d = new TreeMap();
int cc = 0;
int hd = Math.round(predictionTimeSpanFactor * nar.memory.getDuration() / 2f / downSample);
for (Concept c : concepts) {
for (Sentence s : c.beliefs) {
if (s.isEternal()) {
continue;
}
int o = (int) Math.round(((double) s.getOccurenceTime()) / ((double) downSample));
if (o > nar.time()) {
// non-future beliefs
continue;
}
for (int oc = o - hd; oc <= o + hd; oc++) {
double[] x = d.get(oc);
if (x == null) {
x = new double[inputSize];
d.put(oc, x);
}
float freq = 2f * (s.truth.getFrequency() - 0.5f);
float conf = s.truth.getConfidence();
if (freq < 0) {
}
x[cc] += freq * conf;
}
}
cc++;
}
if (d.size() < 2) {
data = null;
return;
}
data = new SampleSet();
int first = d.firstKey();
int last = (int) nar.time();
if (last - first > maxDataFrames * downSample) {
first = last - maxDataFrames * downSample;
}
int frames = (int) (last - first);
int bsize = getInputSize() * frames;
int isize = getPredictionSize() * frames;
if (actual == null || actual.length != bsize)
actual = new double[bsize];
else
Arrays.fill(actual, 0);
if (ideal == null || ideal.length != isize)
ideal = new double[isize];
else
Arrays.fill(ideal, 0);
int idealSize = getPredictionSize();
int ac = 0, id = 0;
double[] prevX = null;
for (int i = first; i <= last; i++) {
double[] x = d.get(i);
if (x == null) {
x = new double[inputSize];
} else {
if (normalizeInputVectors) {
x = normalize(x);
}
}
if (prevX != null) {
System.arraycopy(prevX, 0, actual, ac, inputSize);
ac += inputSize;
System.arraycopy(getTrainedPrediction(x), 0, ideal, id, idealSize);
id += idealSize;
}
prevX = x;
}
Sample s = new Sample(actual, ideal, inputSize, idealSize);
data.add(s);
if (trainer == null) {
trainer = new GradientDescent();
trainer.setNet(net);
trainer.setRnd(rnd);
trainer.setPermute(true);
trainer.setTrainingSet(data);
trainer.setLearningRate(learningrate);
trainer.setMomentum(momentum);
trainer.setEpochs(trainIterationsPerCycle);
trainer.setEarlyStopping(false);
trainer.setOnline(true);
trainer.setTargetError(0);
trainer.clearListener();
} else {
// trainer.reset();
}
trainer.train();
// System.out.println("LSTM error: " + trainer.getTrainingError());
}
Aggregations