use of boofcv.abst.geo.bundle.BundleAdjustmentCamera in project BoofCV by lessthanoptimal.
the class MultiViewStereoFromKnownSceneStructure method computeFusedDisparityAddCloud.
/**
* Combing stereo information from all images in this cluster, compute a disparity image and add it to the cloud
*/
boolean computeFusedDisparityAddCloud(SceneStructureMetric scene, ViewInfo center, TIntObjectMap<String> sbaIndexToName, DogArray_I32 pairIndexes) {
if (!computeFused.process(scene, center.relations.indexSba, pairIndexes, sbaIndexToName::get)) {
if (verbose != null)
verbose.println("FAILED: fused disparity. center.index=" + center.index);
return false;
}
// The fused disparity doesn't compute a mask since all invalid pixels are marked as invalid using
// the disparity value
GrayF32 disparity = computeFused.fusedDisparity;
dummyMask.reshape(disparity);
ImageMiscOps.fill(dummyMask, 0);
// Pass along results to the listener
if (listener != null) {
listener.handleFusedDisparity(center.relations.id, disparity, dummyMask, computeFused.fusedParam);
}
// Convert data structures into a format which is understood by disparity to cloud
BundleAdjustmentCamera camera = scene.cameras.get(center.metric.camera).model;
BundleAdjustmentOps.convert(camera, disparity.width, disparity.height, brown);
// The fused disparity is in regular pixels and not rectified
Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(brown).distort_F64(false, true);
Point2Transform2_F64 pixel_to_norm = new LensDistortionBrown(brown).undistort_F64(true, false);
// world/cloud coordinates into this view
scene.getWorldToView(center.metric, world_to_view1, tmp);
// Use the computed disparity to add to the common point cloud while not adding points already in
// the cloud
disparityCloud.addDisparity(disparity, dummyMask, world_to_view1, computeFused.fusedParam, norm_to_pixel, new PointToPixelTransform_F64(pixel_to_norm));
return true;
}
use of boofcv.abst.geo.bundle.BundleAdjustmentCamera in project BoofCV by lessthanoptimal.
the class GenerateStereoPairGraphFromScene method estimateRadiansToPixels.
/**
* Using the center of the image, estimate the relationship between angle (radians) and image pixels. This is only
* an approximation and is typically worse the farther you are from the center. This is especially true in
* fisheye images. Also disparity is computed from rectified images which lack distortion. An accurate
* model would be more complex.
*/
private void estimateRadiansToPixels(SceneStructureMetric scene) {
double oneDegree = Math.sin(UtilAngle.degreeToRadian(5.0));
Point2D_F64 pixelA = new Point2D_F64();
Point2D_F64 pixelB = new Point2D_F64();
for (int i = 0; i < scene.views.size; i++) {
SceneStructureMetric.View view = scene.views.get(i);
BundleAdjustmentCamera camera = Objects.requireNonNull(scene.getViewCamera(view).model);
camera.project(0.0, 0.0, 1.0, pixelA);
camera.project(oneDegree, 0.0, 1.0, pixelB);
views.get(i).radianToPixels = pixelA.distance(pixelB) / UtilAngle.degreeToRadian(5.0);
}
}
use of boofcv.abst.geo.bundle.BundleAdjustmentCamera in project BoofCV by lessthanoptimal.
the class MultiViewOps method scenePointsToPixels.
/**
* Projects points in the scene onto the specified image. Results are returned using a lambda that provides
* the points index, the point's 3D location in the camera frame, and the projected pixels.
*
* No checks are done for the following:
* <ul>
* <li>Was the feature observed by this view</li>
* <li>Does the feature appear behind the camera</li>
* <li>Is the projected pixel inside the image</li>
* </ul>
*
* @param scene (Input) Metric scene
* @param viewIdx (Input) Index of view for which points are projected
* @param function (Output) Called with results (index, 3D camera location, pixel)
*/
public static void scenePointsToPixels(SceneStructureMetric scene, int viewIdx, BoofLambdas.ProcessIndex2<Point3D_F64, Point2D_F64> function) {
Se3_F64 world_to_view = new Se3_F64();
SceneStructureMetric.View view = scene.views.get(viewIdx);
scene.getWorldToView(view, world_to_view, null);
BundleAdjustmentCamera camera = Objects.requireNonNull(scene.getViewCamera(view).model);
Point3D_F64 camPoint = new Point3D_F64();
Point2D_F64 pixel = new Point2D_F64();
for (int pointIdx = 0; pointIdx < scene.points.size; pointIdx++) {
SceneStructureCommon.Point point = scene.points.get(pointIdx);
double x = point.coordinate[0];
double y = point.coordinate[1];
double z = point.coordinate[2];
double w = scene.isHomogenous() ? point.coordinate[3] : 1.0;
// Project the pixel while being careful for points at infinity
SePointOps_F64.transformV(world_to_view, x, y, z, w, camPoint);
camera.project(camPoint.x, camPoint.y, camPoint.z, pixel);
function.process(pointIdx, camPoint, pixel);
}
}
use of boofcv.abst.geo.bundle.BundleAdjustmentCamera in project BoofCV by lessthanoptimal.
the class MultiViewOps method triangulatePoints.
/**
* Convenience function for initializing bundle adjustment parameters. Triangulates points using camera
* position and pixel observations.
*
* @param structure camera locations
* @param observations observations of features in the images
*/
public static void triangulatePoints(SceneStructureMetric structure, SceneObservations observations) {
TriangulateNViewsMetricH triangulator = FactoryMultiView.triangulateNViewMetricH(ConfigTriangulation.GEOMETRIC());
List<RemoveBrownPtoN_F64> list_p_to_n = new ArrayList<>();
for (int i = 0; i < structure.cameras.size; i++) {
RemoveBrownPtoN_F64 p2n = new RemoveBrownPtoN_F64();
BundleAdjustmentCamera baseModel = Objects.requireNonNull(structure.cameras.data[i].model);
if (baseModel instanceof BundlePinholeSimplified) {
BundlePinholeSimplified cam = (BundlePinholeSimplified) baseModel;
p2n.setK(cam.f, cam.f, 0, 0, 0).setDistortion(new double[] { cam.k1, cam.k2 }, 0, 0);
} else if (baseModel instanceof BundlePinhole) {
BundlePinhole cam = (BundlePinhole) baseModel;
p2n.setK(cam.fx, cam.fy, cam.skew, cam.cx, cam.cy).setDistortion(new double[] { 0, 0 }, 0, 0);
} else if (baseModel instanceof BundlePinholeBrown) {
BundlePinholeBrown cam = (BundlePinholeBrown) baseModel;
p2n.setK(cam.fx, cam.fy, cam.skew, cam.cx, cam.cy).setDistortion(cam.radial, cam.t1, cam.t2);
} else {
throw new RuntimeException("Unknown camera model!");
}
list_p_to_n.add(p2n);
}
DogArray<Point2D_F64> normObs = new DogArray<>(Point2D_F64::new);
normObs.resize(3);
final boolean homogenous = structure.isHomogenous();
Point4D_F64 X = new Point4D_F64();
List<Se3_F64> worldToViews = new ArrayList<>();
for (int i = 0; i < structure.points.size; i++) {
normObs.reset();
worldToViews.clear();
SceneStructureCommon.Point sp = structure.points.get(i);
for (int j = 0; j < sp.views.size; j++) {
int viewIdx = sp.views.get(j);
SceneStructureMetric.View v = structure.views.data[viewIdx];
worldToViews.add(structure.getParentToView(v));
// get the observation in pixels
Point2D_F64 n = normObs.grow();
int pointidx = observations.views.get(viewIdx).point.indexOf(i);
observations.views.get(viewIdx).getPixel(pointidx, n);
// convert to normalized image coordinates
list_p_to_n.get(v.camera).compute(n.x, n.y, n);
}
if (!triangulator.triangulate(normObs.toList(), worldToViews, X)) {
// this should work unless the input is bad
throw new RuntimeException("Triangulation failed. Bad input?");
}
if (homogenous)
sp.set(X.x, X.y, X.z, X.w);
else
sp.set(X.x / X.w, X.y / X.w, X.z / X.w);
}
}
use of boofcv.abst.geo.bundle.BundleAdjustmentCamera in project BoofCV by lessthanoptimal.
the class MultiViewStereoFromKnownSceneStructure method scoreViewsSelectStereoPairs.
/**
* Compute the score for using each view as the center based on coverage and geometric quality
*/
void scoreViewsSelectStereoPairs(SceneStructureMetric scene) {
for (int i = 0; i < arrayScores.size; i++) {
ViewInfo center = arrayScores.get(i);
List<StereoPairGraph.Edge> pairs = center.relations.pairs;
BundleAdjustmentCamera candidateCamera = scene.cameras.get(center.metric.camera).model;
computeRectification.setView1(candidateCamera, center.dimension.width, center.dimension.height);
scoreCoverage.initialize(center.dimension.width, center.dimension.height, computeRectification.view1_dist_to_undist);
scene.getWorldToView(center.metric, world_to_view1, tmp);
// Compute statistics for verbose mode
int totalQualifiedConnections = 0;
double averageQuality = 0.0;
for (int pairIdx = 0; pairIdx < pairs.size(); pairIdx++) {
StereoPairGraph.Edge pair = pairs.get(pairIdx);
// sanity check, since this can be hard to debug if done wrong
BoofMiscOps.checkTrue(pair.quality3D >= 0.0 && pair.quality3D <= 1.0);
// Skip if insufficient geometric information
if (pair.quality3D < minimumQuality3D)
continue;
totalQualifiedConnections++;
averageQuality += pair.quality3D;
// Look up information that the "center" under consideration is connected to
ViewInfo connected = Objects.requireNonNull(mapScores.get(pair.other(center.relations).id));
BundleAdjustmentCamera connectedCamera = scene.cameras.get(connected.metric.camera).model;
// Compute the transform from view-1 to view-2
scene.getWorldToView(connected.metric, world_to_view2, tmp);
world_to_view1.invert(tmp).concat(world_to_view2, view1_to_view2);
// Compute rectification then apply coverage with geometric score
computeRectification.processView2(connectedCamera, connected.dimension.width, connected.dimension.height, view1_to_view2);
scoreCoverage.addView(connected.dimension.width, connected.dimension.height, computeRectification.undist_to_rect2, (float) pair.quality3D, Float::sum);
}
// Look at the sum of all information and see what the score is
scoreCoverage.process();
center.score = scoreCoverage.getScore();
if (verbose != null) {
averageQuality = totalQualifiedConnections > 0 ? averageQuality / totalQualifiedConnections : -1;
verbose.printf("View[%s] center score=%5.2f aveQuality=%.2f conn=%d/%d\n", center.relations.id, center.score, averageQuality, totalQualifiedConnections, pairs.size());
}
}
}
Aggregations