use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.
the class TestMetricSpawnSceneFromView method saveMetricSeed.
@Test
void saveMetricSeed() {
var graph = new PairwiseImageGraph();
List<String> viewIds = BoofMiscOps.asList("A", "B", "C");
var listInliers = new DogArray<>(DogArray_I32::new, DogArray_I32::reset);
// specify the seed view
listInliers.grow().setTo(1, 3, 5, 7, 9);
int numInliers = listInliers.get(0).size;
// create distinctive sets of inlier indexes for each view
for (int otherIdx = 0; otherIdx < viewIds.size() - 1; otherIdx++) {
DogArray_I32 inliers = listInliers.grow();
for (int i = 0; i < numInliers; i++) {
inliers.add(listInliers.get(0).get(i) + 1 + otherIdx);
}
}
// Create some arbitrary metric results that should be saved
var results = new MetricCameras();
for (int viewIdx = 0; viewIdx < viewIds.size(); viewIdx++) {
// skip zero since it's implicit
if (viewIdx > 0)
results.motion_1_to_k.grow().T.setTo(1, viewIdx, 0);
CameraPinhole pinhole = results.intrinsics.grow();
pinhole.fx = pinhole.fy = 100 + viewIdx;
graph.createNode(viewIds.get(viewIdx));
}
var alg = new MetricSpawnSceneFromView();
alg.utils.dbCams = new MockLookUpCameraInfo(800, 800);
var wgraph = new SceneWorkingGraph();
alg.saveMetricSeed(graph, viewIds, listInliers, results, wgraph);
// Check the cameras. There should only be one
assertEquals(1, wgraph.cameras.size());
assertEquals(0, wgraph.cameras.get(0).indexDB);
assertEquals(100, wgraph.cameras.get(0).intrinsic.f);
// See metric view info got saved correctly
BoofMiscOps.forIdx(viewIds, (idx, viewId) -> {
PairwiseImageGraph.View pview = graph.lookupNode(viewId);
assertTrue(wgraph.isKnown(pview));
SceneWorkingGraph.View wview = wgraph.lookupView(viewId);
assertEquals(idx == 0 ? 0 : 1, wview.world_to_view.T.x, 1e-8);
assertEquals(idx, wview.world_to_view.T.y, 1e-8);
});
// See if inliers got saved correctly
BoofMiscOps.forIdx(viewIds, (idx, viewId) -> {
SceneWorkingGraph.View wview = wgraph.lookupView(viewId);
assertEquals(1, wview.inliers.size);
SceneWorkingGraph.InlierInfo inlier = wview.inliers.get(0);
assertEquals(0.0, inlier.scoreGeometric, "Score should be unassigned");
assertEquals(viewIds.size(), inlier.views.size);
assertEquals(viewIds.size(), inlier.observations.size);
assertEquals(viewId, inlier.views.get(0).id);
for (int inlierIdx = 0; inlierIdx < viewIds.size(); inlierIdx++) {
int offset = (idx + inlierIdx) % viewIds.size();
final int c = offset;
DogArray_I32 obs = inlier.observations.get(inlierIdx);
obs.forIdx((i, value) -> assertEquals(i * 2 + 1 + c, value));
}
});
}
use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.
the class MetricSanityChecks method checkPhysicalConstraints.
/**
* Checks physical constraints for one inlier set in a {@link SceneWorkingGraph}. Features are triangulated
* directly from observations. Raw counts for each type of error can be found for this function.
*
* @param dbSimilar Use to get feature locations in the image
* @param scene The scene
* @param wview The view to check
* @param setIdx Which inlier set in the view
* @return true if nothing went wrong or false if a very nasty error was detected
*/
public boolean checkPhysicalConstraints(LookUpSimilarImages dbSimilar, SceneWorkingGraph scene, SceneWorkingGraph.View wview, int setIdx) {
failedTriangulate = 0;
failedBehind = 0;
failedImageBounds = 0;
failedReprojection = 0;
SceneWorkingGraph.InlierInfo inliers = wview.inliers.get(setIdx);
int numFeatures = inliers.getInlierCount();
badFeatures.resetResize(numFeatures, false);
List<SceneWorkingGraph.View> listViews = new ArrayList<>();
List<RemoveBrownPtoN_F64> listNormalize = new ArrayList<>();
List<Se3_F64> listMotion = new ArrayList<>();
List<DogArray<Point2D_F64>> listFeatures = new ArrayList<>();
List<Point2D_F64> listViewPixels = new ArrayList<>();
Se3_F64 view1_to_world = wview.world_to_view.invert(null);
for (int i = 0; i < inliers.views.size; i++) {
SceneWorkingGraph.View w = scene.lookupView(inliers.views.get(i).id);
SceneWorkingGraph.Camera c = scene.getViewCamera(w);
if (c.intrinsic.f <= 0.0) {
if (verbose != null)
verbose.println("Negative focal length. view='" + w.pview.id + "'");
return false;
}
listViews.add(w);
// TODO switch to known camera if available
var normalize = new RemoveBrownPtoN_F64();
normalize.setK(c.intrinsic.f, c.intrinsic.f, 0, 0, 0).setDistortion(c.intrinsic.k1, c.intrinsic.k2);
listNormalize.add(normalize);
listMotion.add(view1_to_world.concat(w.world_to_view, null));
SceneWorkingGraph.Camera wcamera = scene.getViewCamera(w);
var features = new DogArray<>(Point2D_F64::new);
dbSimilar.lookupPixelFeats(w.pview.id, features);
double cx = wcamera.prior.cx;
double cy = wcamera.prior.cy;
features.forEach(p -> p.setTo(p.x - cx, p.y - cy));
listFeatures.add(features);
}
List<Point2D_F64> pixelNorms = BoofMiscOps.createListFilled(inliers.views.size, Point2D_F64::new);
Point4D_F64 foundX = new Point4D_F64();
Point4D_F64 viewX = new Point4D_F64();
Point2D_F64 predictdPixel = new Point2D_F64();
SceneWorkingGraph.Camera wviewCamera = scene.getViewCamera(wview);
for (int inlierIdx = 0; inlierIdx < numFeatures; inlierIdx++) {
listViewPixels.clear();
for (int viewIdx = 0; viewIdx < listViews.size(); viewIdx++) {
Point2D_F64 p = listFeatures.get(viewIdx).get(inliers.observations.get(viewIdx).get(inlierIdx));
listViewPixels.add(p);
listNormalize.get(viewIdx).compute(p.x, p.y, pixelNorms.get(viewIdx));
}
if (!triangulator.triangulate(pixelNorms, listMotion, foundX)) {
failedTriangulate++;
badFeatures.set(inlierIdx, true);
continue;
}
boolean badObservation = false;
for (int viewIdx = 0; viewIdx < listViews.size(); viewIdx++) {
Se3_F64 view1_to_view = listMotion.get(viewIdx);
SceneWorkingGraph.View w = listViews.get(viewIdx);
SePointOps_F64.transform(view1_to_view, foundX, viewX);
if (PerspectiveOps.isBehindCamera(viewX)) {
badObservation = true;
failedBehind++;
}
wviewCamera.intrinsic.project(viewX.x, viewX.y, viewX.z, predictdPixel);
double reprojectionError = predictdPixel.distance2(listViewPixels.get(viewIdx));
if (reprojectionError > maxReprojectionErrorSq) {
badObservation = true;
failedReprojection++;
}
SceneWorkingGraph.Camera wcamera = scene.getViewCamera(w);
int width = wcamera.prior.width;
int height = wcamera.prior.height;
double cx = wcamera.prior.cx;
double cy = wcamera.prior.cy;
if (!BoofMiscOps.isInside(width, height, predictdPixel.x + cx, predictdPixel.y + cy)) {
badObservation = true;
failedImageBounds++;
}
}
badFeatures.set(inlierIdx, badObservation);
}
if (verbose != null)
verbose.printf("view.id='%s' inlierIdx=%d, errors: behind=%d bounds=%d reprojection=%d tri=%d, obs=%d\n", wview.pview.id, setIdx, failedBehind, failedImageBounds, failedReprojection, failedTriangulate, numFeatures);
return true;
}
use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.
the class TestComputeMeanTuple_MT_U8 method compare.
@Test
void compare() {
int DOF = 31;
int numClusters = 4;
// Create a list of random points
List<TupleDesc_U8> list = new ArrayList<>();
var assignments = new DogArray_I32(1000);
for (int i = 0; i < 1000; i++) {
assignments.add(rand.nextInt(numClusters));
var t = new TupleDesc_U8(DOF);
for (int j = 0; j < DOF; j++) {
t.data[j] = (byte) rand.nextInt();
}
list.add(t);
}
var points = new ListAccessor<>(list, (src, dst) -> dst.setTo(src), TupleDesc_U8.class);
var clustersSingle = new DogArray<>(() -> new TupleDesc_U8(DOF));
var clustersMulti = new DogArray<>(() -> new TupleDesc_U8(DOF));
clustersSingle.resize(numClusters);
clustersMulti.resize(numClusters);
var single = new ComputeMeanTuple_U8(DOF);
var multi = new ComputeMeanTuple_MT_U8(DOF);
single.process(points, assignments, clustersSingle);
multi.process(points, assignments, clustersMulti);
assertEquals(clustersSingle.size, clustersMulti.size);
for (int i = 0; i < numClusters; i++) {
assertArrayEquals(clustersSingle.get(i).data, clustersMulti.get(i).data);
}
}
use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.
the class TestQrCodeDecoderImage method withLensDistortion.
@Test
void withLensDistortion() {
// render a distorted image
var helper = new QrCodeDistortedChecks();
helper.render();
// find location of position patterns and create graph
var pps = new DogArray<>(PositionPatternNode::new);
pps.grow().square = new Polygon2D_F64(4);
pps.grow().square = new Polygon2D_F64(4);
pps.grow().square = new Polygon2D_F64(4);
helper.setLocation(pps.get(0).square, pps.get(1).square, pps.get(2).square);
for (int i = 0; i < pps.size; i++) {
pps.get(i).grayThreshold = 125;
}
// these numbers were found by sketching the QR code
connect(pps.get(2), pps.get(1), 3, 1);
connect(pps.get(0), pps.get(1), 0, 2);
// Should fail when run on distorted image
var decoder = new QrCodeDecoderImage<>(null, EciEncoding.ISO8859_1, GrayF32.class);
decoder.process(pps.toList(), helper.image);
assertEquals(0, decoder.successes.size());
// now tell it how to undistort the image
decoder.setLensDistortion(helper.image.width, helper.image.height, helper.distortion);
for (int i = 0; i < pps.size; i++) {
helper.distToUndist(pps.get(i).square);
}
decoder.process(pps.toList(), helper.image);
assertEquals(1, decoder.successes.size());
QrCode found = decoder.getFound().get(0);
assertEquals(found.message, "123");
}
Aggregations