use of nars.NAR in project narchy by automenta.
the class ALEAgent method initNARS.
protected void initNARS() {
this.nar = new NAR(new Default(3000, 10, 3).setInternalExperience(null));
int memoryCyclesPerFrame = 2;
nar.param.duration.set(memoryCyclesPerFrame * 3);
nar.setCyclesPerFrame(memoryCyclesPerFrame);
nar.param.outputVolume.set(0);
nar.param.executionThreshold.set(0.65);
nar.param.shortTermMemoryHistory.set(3);
nar.input("schizo(I)!");
QLAgent agent = new QLAgent(nar, "a", "<be --> good>", this);
agent.ql.brain.setEpsilon(0.1);
mi = new QVis(agent);
/*agent.add(new ShapePerception(new Supplier<BufferedImage>() {
@Override
public BufferedImage get() {
return buffer;
}
}));*/
// agent.add(new AEPerception("AE", 0.25f, 64).setLearningRate(0.1));
// agent.add(new ALEFeaturePerception(0.75f));
agent.add(new AEALEFeaturePerception(0.75f));
Video.themeInvert();
// NARSwing s = new NARSwing(nar);
}
use of nars.NAR in project narchy by automenta.
the class TestQController method main.
public static void main(String[] arg) {
int controlPeriod = 2;
NAR n = newNAR();
TestController qn = new TestController(n, controlPeriod);
qn.setActive(false);
// m has controller deactivated
NAR m = newNAR();
TestController qm = new TestController(m, controlPeriod);
qm.setActive(false);
// random policy
NAR r = newNAR();
TestController qr = new TestController(r, controlPeriod) {
@Override
protected void act(int ignored) {
int action = (int) (Math.random() * getNumActions());
super.act(action);
}
};
qr.setActive(false);
double mm = 0, nn = 0, rr = 0;
int displayCycles = 100;
double[] nAction = new double[qn.getNumActions()];
long startupPeriod = 0;
int resetPeriod = 50000;
double avgCycleToForget = 0;
int time = 0;
while (true) {
if (time % resetPeriod == 0) {
System.out.println("RESET");
// TextOutput.out(n);
n.reset();
m.reset();
r.reset();
// input("nal/original/Example-MultiStep-edited.nal", n, m, r);
// input("nal/test/nars_multistep_1.nal", n, m, r);
// input("nal/test/nars_multistep_2.nal", n, m, r);
}
if (time > startupPeriod) {
qr.setActive(true);
qn.setActive(true);
double[] oqn = qn.getOutput();
if (oqn != null) {
for (int i = 0; i < nAction.length; i++) nAction[i] += oqn[i] / displayCycles;
}
}
n.frame(1);
m.frame(1);
r.frame(1);
avgCycleToForget += (n.memory).conceptForgetDurations.getValue() / displayCycles;
mm += qm.reward();
nn += qn.reward();
rr += qr.reward();
if (time % displayCycles == 0) {
System.out.print(// ((nn-mm)/((nn+mm)/2.0)*100.0) + " , " +
time + ", " + df.format(mm) + " , " + df.format(nn) + " , " + df.format(rr) + " , ");
// System.out.println();
System.out.print(avgCycleToForget + ", ");
printCSVLine(System.out, nAction);
mm = nn = rr = avgCycleToForget = 0;
Arrays.fill(nAction, 0);
}
time++;
}
}
use of nars.NAR in project narchy by automenta.
the class Line1DCalibrate method main.
public static void main(String[] args) {
Param.DEBUG = true;
NAR n = NARS.threadSafe();
// new STMTemporalLinkage(n, 2, false);
n.time.dur(1);
n.termVolumeMax.set(16);
// n.beliefConfidence(0.9f);
// n.goalConfidence(0.5f);
// n.onCycle((nn) -> {
// nn.stats(System.out);
// });
// n.truthResolution.setValue(0.05f);
Line1DSimplest a = new Line1DSimplest() {
// final FloatAveraged rewardAveraged = new FloatAveraged(()->super.act(), 10);
@Override
protected float act() {
float r = super.act();
System.out.println("reward: " + now + "\t^" + n2(i.floatValue()) + "\t@" + n2(o.floatValue()) + "\t\t= " + r);
return r;
}
};
// in time units
float tHz = 0.05f;
// in 0..1.0
float yResolution = 0.1f;
float periods = 16;
// final int runtime = Math.round(periods / tHz);
// Set.of(a.up.term(), a.down.term());
Collection actions = a.actions.values();
n.onTask(t -> {
if (t instanceof DerivedTask) {
if (t.isGoal()) {
if (actions.contains(t.term())) {
float dir = new PreciseTruth(t.freq(), t.evi(a.nar().time(), a.nar().dur()), false).freq() - 0.5f;
// TEST POLARITY
float i = a.i.floatValue();
float o = a.o.floatValue();
float neededDir = (i - o);
boolean good = Math.signum(neededDir) == Math.signum(dir);
/*if (!good)*/
System.err.println(n4(dir) + "\t" + good + " " + i + " <-? " + o);
System.err.println(t.proof());
System.out.println();
}
if (t.isGoal())
System.err.println(t.proof());
} else {
// System.err.println(t.toString(n));
}
}
});
a.speed.set(yResolution);
// a.up.resolution.setValue(yResolution);
// a.down.resolution.setValue(yResolution);
a.in.resolution(yResolution);
a.curiosity.set(0.1f);
// a.in.beliefs().capacity(0, 100, a.nar);
// a.out.beliefs().capacity(0, 100, a.nar);
// a.out.goals().capacity(0, 100, a.nar);
// Line1DTrainer trainer = new Line1DTrainer(a);
// new RLBooster(a, new HaiQAgent(), 5);
// ImplicationBooster.implAccelerator(a);
a.onFrame((z) -> {
a.target(// Math.signum(Math.sin(a.nar.time() * tHz * 2 * PI) ) > 0 ? 1f : -1f
Util.round((float) (0.5f + 0.5f * Math.sin(a.nar().time() * tHz * 2 * PI)), yResolution));
// Util.pause(1);
});
// a.runCycles(runtime);
// new Thread(() -> {
// //NAgentX.chart(a);
// int history = 800;
// window(
// row(
// conceptPlot(a.nar, Lists.newArrayList(
// () -> (float) a.i.floatValue(),
// a.o,
// //a.out.feedback.current!=null ? a.out.feedback.current.freq() : 0f,
// () -> a.reward
// //() -> a.rewardSum
// )
// ,
// history),
// col(
// new Vis.EmotionPlot(history, a),
// new ReflectionSurface<>(a),
// Vis.beliefCharts(history,
// Iterables.concat(a.sensors.keySet(), a.actions.keySet()), a.nar)
// )
// )
// , 900, 900);
//
// }).start();
// n.startFPS(100);
n.run(2000);
// n.tasks().forEach(x -> {
// if (x.isBelief() && x.op()==IMPL) {
// System.out.println(x.proof());
// }
// });
}
use of nars.NAR in project narchy by automenta.
the class NARio method main.
public static void main(String[] args) {
// Param.DEBUG = true;
NAR nar = runRT((NAR n) -> {
NARio x;
x = new NARio(n);
n.freqResolution.set(0.02f);
n.confResolution.set(0.01f);
// n.time.dur(n.dur()/2);
// new Implier(1, x, 0, 1);
// x.durations.setValue(2f);
x.trace = true;
// });
return x;
// n.termVolumeMax.setValue(60);
// try {
// ImmutableTask r = (ImmutableTask) n.ask($.$("(?x ==> happy(nario))"), ETERNAL, (q, a) -> {
// System.err.println(a);
// });
// n.onCycle((nn) -> {
// r.budgetSafe(1f, 0.9f);
// nn.input(r);
// });
// n.onTask(tt -> {
// if (tt.isBelief() && tt.op() == IMPL)
// System.err.println("\t" + tt);
// });
// } catch (Narsese.NarseseException e) {
// e.printStackTrace();
// }
}, 24);
// ArrayList<PLink<Concept>> x = Lists.newArrayList(nar.conceptsActive());
// x.sort((a,b)->{
// int z = Float.compare(a.pri(), b.pri());
// if (z == 0)
// return Integer.compare(a.get().hashCode(), b.get().hashCode());
// return z;
// });
// for (PLink y : x)
// System.out.println(y);
}
use of nars.NAR in project narchy by automenta.
the class NQuadsRDFTest method test1.
@Test
public void test1() throws Exception {
final NAR n = NARS.tmp();
n.log();
NQuadsRDF.input(n, "<http://example.org/#spiderman> <http://xmlns.com/foaf/0.1/name> \"Человек-паук\"@ru .");
n.run(1);
assertTrue(n.concepts.size() > 2);
}
Aggregations