use of spacegraph.video.Draw in project narchy by automenta.
the class OsmSpace method compile.
protected Consumer<GL2> compile() {
List<Consumer<GL2>> draw = new FasterList();
Osm osm = id;
for (OsmWay way : osm.ways) {
Map<String, String> tags = way.tags;
String building, building_part, landuse, natural, route, highway;
if (!tags.isEmpty()) {
building = tags.get("building");
building_part = tags.get("building:part");
landuse = tags.get("landuse");
natural = tags.get("natural");
route = tags.get("route");
highway = tags.get("highway");
} else {
building = building_part = landuse = natural = route = highway = null;
}
boolean isPolygon = false;
boolean isClosed = way.isClosed();
float r, g, b, a;
float lw;
short ls;
if (building != null || building_part != null) {
r = 0f;
g = 1f;
b = 1f;
a = 1f;
lw = 1f;
ls = (short) 0xFFFF;
isPolygon = !wireframe && isClosed;
} else if ("forest".equals(landuse) || "grass".equals(landuse) || "wood".equals(natural)) {
r = 0f;
g = 1f;
b = 0f;
a = 1f;
lw = 1f;
ls = (short) 0xFFFF;
isPolygon = !wireframe && isClosed;
} else if ("water".equals(natural)) {
r = 0f;
g = 0f;
b = 1f;
a = 1f;
lw = 1f;
ls = (short) 0xFFFF;
isPolygon = !wireframe && isClosed;
} else if ("pedestrian".equals(highway)) {
r = 0f;
g = 0.5f;
b = 0f;
a = 1f;
lw = 2f;
ls = (short) 0xFFFF;
} else if ("motorway".equals(highway)) {
r = 1f;
g = 0.5f;
b = 0f;
a = 1f;
lw = 5f;
ls = (short) 0xFFFF;
} else if (highway != null) {
r = 1f;
g = 1f;
b = 1f;
a = 1f;
lw = 3f;
ls = (short) 0xFFFF;
} else if ("road".equals(route)) {
r = 1f;
g = 1f;
b = 1f;
a = 1f;
lw = 1f;
ls = (short) 0xFFFF;
} else if ("train".equals(route)) {
r = 1f;
g = 1f;
b = 1f;
a = 1f;
lw = 5f;
ls = (short) 0xF0F0;
} else {
r = 0.5f;
g = 0f;
b = 0.5f;
a = 1f;
lw = 1f;
ls = (short) 0xFFFF;
}
if (isPolygon) {
List<OsmNode> nn = way.getOsmNodes();
double[][] coord = new double[nn.size()][7];
for (int i = 0, nnSize = nn.size(); i < nnSize; i++) {
OsmNode node = nn.get(i);
double[] ci = coord[i];
project(node.geoCoordinate, ci);
ci[3] = r;
ci[4] = g;
ci[5] = b;
ci[6] = a;
}
draw.add((gl) -> {
gl.glColor4f(r * 0.5f, g * .5f, b * 0.5f, a);
gl.glLineWidth(lw);
gl.glLineStipple(1, ls);
GLU.gluTessBeginPolygon(tobj, null);
GLU.gluTessBeginContour(tobj);
for (int i = 0, nnSize = nn.size(); i < nnSize; i++) {
double[] ci = coord[i];
GLU.gluTessVertex(tobj, ci, 0, ci);
}
GLU.gluTessEndContour(tobj);
GLU.gluTessEndPolygon(tobj);
});
} else {
List<OsmNode> ways = way.getOsmNodes();
int ws = ways.size();
double[] c3 = new double[3 * ws];
for (int i = 0, waysSize = ws; i < waysSize; i++) {
project(ways.get(i).geoCoordinate, c3, i * 3);
}
draw.add((gl) -> {
gl.glColor4f(r, g, b, a);
gl.glLineWidth(lw);
gl.glLineStipple(1, ls);
gl.glBegin(GL_LINE_STRIP);
for (int i = 0; i < c3.length / 3; i++) {
gl.glVertex3dv(c3, i * 3);
}
gl.glEnd();
});
}
}
for (OsmNode node : osm.nodes) {
Map<String, String> tags = node.tags;
if (tags.isEmpty())
continue;
String highway = tags.get("highway");
String natural = tags.get("natural");
float pointSize;
float r, g, b, a;
if ("bus_stop".equals(highway)) {
pointSize = 3;
r = g = b = 1f;
a = 0.7f;
} else if ("traffic_signals".equals(highway)) {
pointSize = 3;
r = g = 1f;
b = 0f;
a = 0.7f;
} else if ("tree".equals(natural)) {
pointSize = 3;
g = 1f;
r = b = 0f;
a = 0.7f;
} else {
pointSize = 3;
r = 1f;
g = b = 0f;
a = 0.7f;
}
double[] c3 = new double[3];
project(node.geoCoordinate, c3);
draw.add((gl) -> {
gl.glPointSize(pointSize);
gl.glBegin(GL_POINTS);
gl.glColor4f(r, g, b, a);
gl.glVertex3d(c3[0], c3[1], c3[2]);
gl.glEnd();
});
}
return (v) -> draw.forEach(d -> d.accept(v));
}
use of spacegraph.video.Draw in project narchy by automenta.
the class BeliefTableChart method renderTable.
private void renderTable(Concept c, long minT, long maxT, long now, GL2 gl, TruthWave wave, boolean beliefOrGoal) {
if (c == null)
return;
float nowX = xTime(minT, maxT, now);
// Present axis line
if ((now <= maxT) && (now >= minT)) {
gl.glColor4f(1f, 1f, 1f, 0.5f);
Draw.line(gl, nowX, 0, nowX, 1);
// float nowLineWidth = 0.005f;
// Draw.rect(gl, nowX - nowLineWidth / 2f, 0, nowLineWidth, 1);
}
/**
* drawn "pixel" dimensions
*/
renderWave(nowX, minT, maxT, gl, wave, beliefOrGoal);
// draw projections
if (projections > 0 && minT != maxT) {
for (boolean freqOrExp : new boolean[] { true, false }) {
TruthWave pwave = beliefOrGoal ? beliefProj : goalProj;
// HACK dont show expectation for beliefs
if (beliefOrGoal && !freqOrExp)
continue;
Colorize colorize;
if (freqOrExp) {
colorize = beliefOrGoal ? (ggl, frq, cnf) -> {
float a = 0.65f + 0.2f * cnf;
ggl.glColor4f(0.25f + 0.75f * cnf, 0.1f * (1f - cnf), 0, a);
} : (ggl, frq, cnf) -> {
float a = 0.65f + 0.2f * cnf;
ggl.glColor4f(0.1f * (1f - cnf), 0.25f + 0.75f * cnf, 0, a);
};
} else {
colorize = beliefOrGoal ? (ggl, frq, cnf) -> {
ggl.glColor4f(cnf, cnf / 2f, 0.25f, 0.85f);
} : (ggl, frq, cnf) -> {
ggl.glColor4f(cnf / 2f, cnf, 0.25f, 0.85f);
};
}
FloatFloatToFloatFunction y = freqOrExp ? (frq, cnf) -> frq : TruthFunctions::expectation;
// HACK show goal freq in thinner line
gl.glLineWidth((freqOrExp && !beliefOrGoal) ? 2f : 4f);
renderWaveLine(nowX, minT, maxT, gl, pwave, y, colorize);
}
}
float chSize = 0.1f;
Truth bc = wave.current;
if (bc != null) {
float theta;
float expectation = bc.expectation();
float dTheta = (expectation - 0.5f) * angleSpeed;
float conf = bc.conf();
if (beliefOrGoal) {
this.beliefTheta += dTheta;
theta = beliefTheta;
gl.glColor4f(1f, 0f, 0, 0.2f + 0.8f * conf);
drawCrossHair(gl, nowX, chSize, bc.freq(), conf, theta);
} else {
this.goalTheta += dTheta;
theta = goalTheta;
// //freq
// gl.glColor4f(0f, 1f, 0, 0.2f + 0.8f * conf);
// drawCrossHair(gl, nowX, chSize, bc.freq(), conf, theta);
// expectation
gl.glColor4f(0f, 1f, 0, 0.2f + 0.8f * conf);
drawCrossHair(gl, nowX, chSize, expectation, expectation, theta);
}
}
}
use of spacegraph.video.Draw in project narchy by automenta.
the class ExeCharts method metaGoalPlot.
public static Surface metaGoalPlot(NAR nar) {
int s = nar.causes.size();
FloatRange gain = new FloatRange(20f, 0f, 20f);
BitmapMatrixView bmp = new BitmapMatrixView((i) -> Util.tanhFast(gain.floatValue() * nar.causes.get(i).value()), // Util.tanhFast(nar.causes.get(i).value()),
s, Math.max(1, (int) Math.ceil(Math.sqrt(s))), Draw::colorBipolar) {
DurService on;
{
on = DurService.on(nar, this::update);
}
@Override
public void stop() {
on.off();
on = null;
super.stop();
}
};
return new Splitting(bmp, new AutoSurface<>(gain), 0.1f);
}
use of spacegraph.video.Draw in project narchy by automenta.
the class Recog2D method conceptTraining.
Surface conceptTraining(BeliefVector tv, NAR nar) {
// LinkedHashMap<TaskConcept, BeliefVector.Neuron> out = tv.out;
Plot2D p;
int history = 256;
Gridding g = new Gridding(p = new Plot2D(history, Plot2D.Line).add("Reward", () -> reward), new AspectAlign(new CameraSensorView(sp, this), AspectAlign.Align.Center, sp.width, sp.height), new Gridding(beliefTableCharts(nar, List.of(tv.concepts), 16)), new Gridding(IntStream.range(0, tv.concepts.length).mapToObj(i -> new spacegraph.space2d.widget.text.Label(String.valueOf(i)) {
@Override
protected void paintBelow(GL2 gl) {
Concept c = tv.concepts[i];
BeliefVector.Neuron nn = tv.neurons[i];
float freq, conf;
Truth t = nar.beliefTruth(c, nar.time());
if (t != null) {
conf = t.conf();
freq = t.freq();
} else {
conf = nar.confMin.floatValue();
float defaultFreq = // interpret no-belief as maybe
0.5f;
// Float.NaN //use NaN to force learning of negation as separate from no-belief
freq = defaultFreq;
}
Draw.colorBipolar(gl, 2f * (freq - 0.5f));
float m = 0.5f * conf;
Draw.rect(gl, bounds);
if (tv.verify) {
float error = nn.error;
if (error != error) {
// training phase
// Draw.rect(gl, m / 2, m / 2, 1 - m, 1 - m);
} else {
// verification
// draw backgroudn/border
// gl.glColor3f(error, 1f - error, 0f);
//
// float fontSize = 0.08f;
// gl.glColor3f(1f, 1f, 1f);
// Draw.text(gl, c.term().toString(), fontSize, m / 2, 1f - m / 2, 0);
// Draw.text(gl, "err=" + n2(error), fontSize, m / 2, m / 2, 0);
}
}
}
}).toArray(Surface[]::new)));
final int[] frames = { 0 };
onFrame(() -> {
if (frames[0]++ % imagePeriod == 0) {
nextImage();
}
redraw();
// if (neural.get()) {
// if (nar.time() < trainFrames) {
outs.expect(image);
if (neural.get()) {
train.update(mlpLearn, mlpSupport);
}
p.update();
// s.update();
});
return g;
}
Aggregations