use of boofcv.alg.distort.brown.RemoveBrownPtoN_F64 in project BoofCV by lessthanoptimal.
the class TestVisualDepthOps method compute.
private Point3D_F64 compute(int x, int y, int distance) {
Point2D_F64 n = new Point2D_F64();
RemoveBrownPtoN_F64 p2n = new RemoveBrownPtoN_F64();
p2n.setK(param.fx, param.fy, param.skew, param.cx, param.cy).setDistortion(param.radial, param.t1, param.t2);
p2n.compute(x, y, n);
Point3D_F64 p = new Point3D_F64();
p.z = distance;
p.x = n.x * p.z;
p.y = n.y * p.z;
return p;
}
use of boofcv.alg.distort.brown.RemoveBrownPtoN_F64 in project BoofCV by lessthanoptimal.
the class MultiViewOps method triangulatePoints.
/**
* Convenience function for initializing bundle adjustment parameters. Triangulates points using camera
* position and pixel observations.
*
* @param structure camera locations
* @param observations observations of features in the images
*/
public static void triangulatePoints(SceneStructureMetric structure, SceneObservations observations) {
TriangulateNViewsMetricH triangulator = FactoryMultiView.triangulateNViewMetricH(ConfigTriangulation.GEOMETRIC());
List<RemoveBrownPtoN_F64> list_p_to_n = new ArrayList<>();
for (int i = 0; i < structure.cameras.size; i++) {
RemoveBrownPtoN_F64 p2n = new RemoveBrownPtoN_F64();
BundleAdjustmentCamera baseModel = Objects.requireNonNull(structure.cameras.data[i].model);
if (baseModel instanceof BundlePinholeSimplified) {
BundlePinholeSimplified cam = (BundlePinholeSimplified) baseModel;
p2n.setK(cam.f, cam.f, 0, 0, 0).setDistortion(new double[] { cam.k1, cam.k2 }, 0, 0);
} else if (baseModel instanceof BundlePinhole) {
BundlePinhole cam = (BundlePinhole) baseModel;
p2n.setK(cam.fx, cam.fy, cam.skew, cam.cx, cam.cy).setDistortion(new double[] { 0, 0 }, 0, 0);
} else if (baseModel instanceof BundlePinholeBrown) {
BundlePinholeBrown cam = (BundlePinholeBrown) baseModel;
p2n.setK(cam.fx, cam.fy, cam.skew, cam.cx, cam.cy).setDistortion(cam.radial, cam.t1, cam.t2);
} else {
throw new RuntimeException("Unknown camera model!");
}
list_p_to_n.add(p2n);
}
DogArray<Point2D_F64> normObs = new DogArray<>(Point2D_F64::new);
normObs.resize(3);
final boolean homogenous = structure.isHomogenous();
Point4D_F64 X = new Point4D_F64();
List<Se3_F64> worldToViews = new ArrayList<>();
for (int i = 0; i < structure.points.size; i++) {
normObs.reset();
worldToViews.clear();
SceneStructureCommon.Point sp = structure.points.get(i);
for (int j = 0; j < sp.views.size; j++) {
int viewIdx = sp.views.get(j);
SceneStructureMetric.View v = structure.views.data[viewIdx];
worldToViews.add(structure.getParentToView(v));
// get the observation in pixels
Point2D_F64 n = normObs.grow();
int pointidx = observations.views.get(viewIdx).point.indexOf(i);
observations.views.get(viewIdx).getPixel(pointidx, n);
// convert to normalized image coordinates
list_p_to_n.get(v.camera).compute(n.x, n.y, n);
}
if (!triangulator.triangulate(normObs.toList(), worldToViews, X)) {
// this should work unless the input is bad
throw new RuntimeException("Triangulation failed. Bad input?");
}
if (homogenous)
sp.set(X.x, X.y, X.z, X.w);
else
sp.set(X.x / X.w, X.y / X.w, X.z / X.w);
}
}
use of boofcv.alg.distort.brown.RemoveBrownPtoN_F64 in project BoofCV by lessthanoptimal.
the class SceneMergingOperations method loadExtrinsicsIntrinsics.
/**
* Loads information about the view's intrinsics and estimated intrinsics in the specified scene
*
* @param scene Which scene is being considered
* @param inliers Information on the views and inlier set used to estimate the target view
* @param listWorldToViewSrc (Output) Extrinsics
* @param listIntrinsicsSrc (Output) Intrinsics
*/
private void loadExtrinsicsIntrinsics(SceneWorkingGraph scene, SceneWorkingGraph.InlierInfo inliers, List<Se3_F64> listWorldToViewSrc, DogArray<RemoveBrownPtoN_F64> listIntrinsicsSrc) {
// Clear lists
listWorldToViewSrc.clear();
listIntrinsicsSrc.reset();
// Go through each view and extract it's SE3
for (int viewIdx = 0; viewIdx < inliers.views.size; viewIdx++) {
PairwiseImageGraph.View pview = inliers.views.get(viewIdx);
SceneWorkingGraph.View wview = Objects.requireNonNull(scene.views.get(pview.id));
BundlePinholeSimplified cam = scene.getViewCamera(wview).intrinsic;
// Save the view's pose
listWorldToViewSrc.add(wview.world_to_view);
// Convert the intrinsics model to one that can be used to go from pixel to normalized
RemoveBrownPtoN_F64 pixelToNorm = listIntrinsicsSrc.grow();
pixelToNorm.setK(cam.f, cam.f, 0, 0, 0);
pixelToNorm.setDistortion(cam.k1, cam.k2);
}
}
use of boofcv.alg.distort.brown.RemoveBrownPtoN_F64 in project BoofCV by lessthanoptimal.
the class VisualDepthOps method depthTo3D.
/**
* Creates a point cloud from a depth image and saves the color information. The depth and color images are
* assumed to be aligned.
*
* @param param Intrinsic camera parameters for depth image
* @param depth depth image. each value is in millimeters.
* @param rgb Color image that's aligned to the depth.
* @param cloud Output point cloud
* @param cloudColor Output color for each point in the cloud
*/
public static void depthTo3D(CameraPinholeBrown param, Planar<GrayU8> rgb, GrayU16 depth, DogArray<Point3D_F64> cloud, DogArray<int[]> cloudColor) {
cloud.reset();
cloudColor.reset();
RemoveBrownPtoN_F64 p2n = new RemoveBrownPtoN_F64();
p2n.setK(param.fx, param.fy, param.skew, param.cx, param.cy).setDistortion(param.radial, param.t1, param.t2);
Point2D_F64 n = new Point2D_F64();
GrayU8 colorR = rgb.getBand(0);
GrayU8 colorG = rgb.getBand(1);
GrayU8 colorB = rgb.getBand(2);
for (int y = 0; y < depth.height; y++) {
int index = depth.startIndex + y * depth.stride;
for (int x = 0; x < depth.width; x++) {
int mm = depth.data[index++] & 0xFFFF;
// skip pixels with no depth information
if (mm == 0)
continue;
// this could all be precomputed to speed it up
p2n.compute(x, y, n);
Point3D_F64 p = cloud.grow();
p.z = mm;
p.x = n.x * p.z;
p.y = n.y * p.z;
int[] color = cloudColor.grow();
color[0] = colorR.unsafe_get(x, y);
color[1] = colorG.unsafe_get(x, y);
color[2] = colorB.unsafe_get(x, y);
}
}
}
use of boofcv.alg.distort.brown.RemoveBrownPtoN_F64 in project BoofCV by lessthanoptimal.
the class MetricSanityChecks method checkPhysicalConstraints.
/**
* Checks physical constraints for one inlier set in a {@link SceneWorkingGraph}. Features are triangulated
* directly from observations. Raw counts for each type of error can be found for this function.
*
* @param dbSimilar Use to get feature locations in the image
* @param scene The scene
* @param wview The view to check
* @param setIdx Which inlier set in the view
* @return true if nothing went wrong or false if a very nasty error was detected
*/
public boolean checkPhysicalConstraints(LookUpSimilarImages dbSimilar, SceneWorkingGraph scene, SceneWorkingGraph.View wview, int setIdx) {
failedTriangulate = 0;
failedBehind = 0;
failedImageBounds = 0;
failedReprojection = 0;
SceneWorkingGraph.InlierInfo inliers = wview.inliers.get(setIdx);
int numFeatures = inliers.getInlierCount();
badFeatures.resetResize(numFeatures, false);
List<SceneWorkingGraph.View> listViews = new ArrayList<>();
List<RemoveBrownPtoN_F64> listNormalize = new ArrayList<>();
List<Se3_F64> listMotion = new ArrayList<>();
List<DogArray<Point2D_F64>> listFeatures = new ArrayList<>();
List<Point2D_F64> listViewPixels = new ArrayList<>();
Se3_F64 view1_to_world = wview.world_to_view.invert(null);
for (int i = 0; i < inliers.views.size; i++) {
SceneWorkingGraph.View w = scene.lookupView(inliers.views.get(i).id);
SceneWorkingGraph.Camera c = scene.getViewCamera(w);
if (c.intrinsic.f <= 0.0) {
if (verbose != null)
verbose.println("Negative focal length. view='" + w.pview.id + "'");
return false;
}
listViews.add(w);
// TODO switch to known camera if available
var normalize = new RemoveBrownPtoN_F64();
normalize.setK(c.intrinsic.f, c.intrinsic.f, 0, 0, 0).setDistortion(c.intrinsic.k1, c.intrinsic.k2);
listNormalize.add(normalize);
listMotion.add(view1_to_world.concat(w.world_to_view, null));
SceneWorkingGraph.Camera wcamera = scene.getViewCamera(w);
var features = new DogArray<>(Point2D_F64::new);
dbSimilar.lookupPixelFeats(w.pview.id, features);
double cx = wcamera.prior.cx;
double cy = wcamera.prior.cy;
features.forEach(p -> p.setTo(p.x - cx, p.y - cy));
listFeatures.add(features);
}
List<Point2D_F64> pixelNorms = BoofMiscOps.createListFilled(inliers.views.size, Point2D_F64::new);
Point4D_F64 foundX = new Point4D_F64();
Point4D_F64 viewX = new Point4D_F64();
Point2D_F64 predictdPixel = new Point2D_F64();
SceneWorkingGraph.Camera wviewCamera = scene.getViewCamera(wview);
for (int inlierIdx = 0; inlierIdx < numFeatures; inlierIdx++) {
listViewPixels.clear();
for (int viewIdx = 0; viewIdx < listViews.size(); viewIdx++) {
Point2D_F64 p = listFeatures.get(viewIdx).get(inliers.observations.get(viewIdx).get(inlierIdx));
listViewPixels.add(p);
listNormalize.get(viewIdx).compute(p.x, p.y, pixelNorms.get(viewIdx));
}
if (!triangulator.triangulate(pixelNorms, listMotion, foundX)) {
failedTriangulate++;
badFeatures.set(inlierIdx, true);
continue;
}
boolean badObservation = false;
for (int viewIdx = 0; viewIdx < listViews.size(); viewIdx++) {
Se3_F64 view1_to_view = listMotion.get(viewIdx);
SceneWorkingGraph.View w = listViews.get(viewIdx);
SePointOps_F64.transform(view1_to_view, foundX, viewX);
if (PerspectiveOps.isBehindCamera(viewX)) {
badObservation = true;
failedBehind++;
}
wviewCamera.intrinsic.project(viewX.x, viewX.y, viewX.z, predictdPixel);
double reprojectionError = predictdPixel.distance2(listViewPixels.get(viewIdx));
if (reprojectionError > maxReprojectionErrorSq) {
badObservation = true;
failedReprojection++;
}
SceneWorkingGraph.Camera wcamera = scene.getViewCamera(w);
int width = wcamera.prior.width;
int height = wcamera.prior.height;
double cx = wcamera.prior.cx;
double cy = wcamera.prior.cy;
if (!BoofMiscOps.isInside(width, height, predictdPixel.x + cx, predictdPixel.y + cy)) {
badObservation = true;
failedImageBounds++;
}
}
badFeatures.set(inlierIdx, badObservation);
}
if (verbose != null)
verbose.printf("view.id='%s' inlierIdx=%d, errors: behind=%d bounds=%d reprojection=%d tri=%d, obs=%d\n", wview.pview.id, setIdx, failedBehind, failedImageBounds, failedReprojection, failedTriangulate, numFeatures);
return true;
}
Aggregations