use of boofcv.abst.geo.bundle.SceneStructureMetric in project BoofCV by lessthanoptimal.
the class VisOdomBundleAdjustment method setupBundleStructure.
/**
* Converts input data into a format that bundle adjustment can understand
*/
private void setupBundleStructure() {
// Need to count the total number of tracks that will be feed into bundle adjustment
int totalBundleTracks = selectedTracks.size();
// Initialize data structures
final SceneStructureMetric structure = bundle.getStructure();
final SceneObservations observations = bundle.getObservations();
observations.initialize(frames.size);
structure.initialize(cameras.size, frames.size, totalBundleTracks);
for (int cameraIdx = 0; cameraIdx < cameras.size; cameraIdx++) {
structure.setCamera(cameraIdx, true, cameras.get(cameraIdx).bundleCamera);
}
// TODO make the first frame at origin. This is done to avoid numerical after traveling a good distance
for (int frameIdx = 0; frameIdx < frames.size; frameIdx++) {
BFrame bf = frames.get(frameIdx);
bf.frame_to_world.invert(world_to_view);
structure.setView(frameIdx, bf.camera.index, frameIdx == 0, world_to_view);
// save the index since it's needed in the next loop
frames.get(frameIdx).listIndex = frameIdx;
}
// A feature is only passed to SBA if it is active and more than one view has seen it
// this requires it to have a different index
int featureBundleIdx = 0;
for (int trackIdx = 0; trackIdx < tracks.size; trackIdx++) {
BTrack bt = tracks.get(trackIdx);
if (!bt.selected) {
continue;
}
Point4D_F64 p = bt.worldLoc;
structure.setPoint(featureBundleIdx, p.x, p.y, p.z, p.w);
for (int obsIdx = 0; obsIdx < bt.observations.size; obsIdx++) {
BObservation o = bt.observations.get(obsIdx);
SceneObservations.View view = observations.getView(o.frame.listIndex);
view.add(featureBundleIdx, (float) o.pixel.x, (float) o.pixel.y);
}
featureBundleIdx++;
}
// Sanity check
if (featureBundleIdx != structure.points.size)
throw new RuntimeException("BUG! tracks feed in and points don't match");
}
use of boofcv.abst.geo.bundle.SceneStructureMetric in project BoofCV by lessthanoptimal.
the class CalibrateStereoPlanar method calibrateMono.
/**
* Compute intrinsic calibration for one of the cameras
*/
private CameraPinholeBrown calibrateMono(CalibrateMonoPlanar calib, List<Se3_F64> location) {
calib.setVerbose(verbose, null);
CameraPinholeBrown intrinsic = calib.process();
SceneStructureMetric structure = calib.getStructure();
for (int i = 0; i < structure.motions.size; i++) {
location.add(structure.motions.data[i].motion);
}
return intrinsic;
}
use of boofcv.abst.geo.bundle.SceneStructureMetric in project BoofCV by lessthanoptimal.
the class CalibrateStereoPlanar method computeErrors.
public List<ImageResults> computeErrors() {
final SceneStructureMetric structure = bundleUtils.getStructure();
final SceneObservations observations = bundleUtils.getObservations();
List<ImageResults> errors = new ArrayList<>();
double[] parameters = new double[structure.getParameterCount()];
double[] residuals = new double[observations.getObservationCount() * 2];
CodecSceneStructureMetric codec = new CodecSceneStructureMetric();
codec.encode(structure, parameters);
BundleAdjustmentMetricResidualFunction function = new BundleAdjustmentMetricResidualFunction();
function.configure(structure, observations);
function.process(parameters, residuals);
int idx = 0;
for (int i = 0; i < observations.viewsRigid.size; i++) {
SceneObservations.View v = observations.viewsRigid.data[i];
ImageResults r = new ImageResults(v.size());
double sumX = 0;
double sumY = 0;
double meanErrorMag = 0;
double maxError = 0;
for (int j = 0; j < v.size(); j++) {
double x = residuals[idx++];
double y = residuals[idx++];
double nerr = r.pointError[j] = Math.sqrt(x * x + y * y);
meanErrorMag += nerr;
maxError = Math.max(maxError, nerr);
sumX += x;
sumY += y;
}
r.biasX = sumX / v.size();
r.biasY = sumY / v.size();
r.meanError = meanErrorMag / v.size();
r.maxError = maxError;
errors.add(r);
}
return errors;
}
use of boofcv.abst.geo.bundle.SceneStructureMetric in project BoofCV by lessthanoptimal.
the class CalibrateStereoPlanar method refineAll.
/**
* Jointly refines both cameras together
*
* @param parameters (input) initial estimate and is updated if refine is successful
*/
private void refineAll(StereoParameters parameters) {
Se3_F64 left_to_right = parameters.right_to_left.invert(null);
final SceneStructureMetric structure = bundleUtils.getStructure();
final SceneObservations observations = bundleUtils.getObservations();
final SceneStructureMetric structureLeft = calibLeft.getStructure();
final SceneStructureMetric structureRight = calibRight.getStructure();
int numViews = structureLeft.views.size;
// left and right cameras. n views, and 1 known calibration target
structure.initialize(2, numViews * 2, numViews + 1, layout.size(), 1);
// initialize the cameras
structure.setCamera(0, false, structureLeft.cameras.get(0).model);
structure.setCamera(1, false, structureRight.cameras.get(0).model);
// configure the known calibration target
structure.setRigid(0, true, new Se3_F64(), layout.size());
SceneStructureMetric.Rigid rigid = structure.rigids.data[0];
for (int i = 0; i < layout.size(); i++) {
rigid.setPoint(i, layout.get(i).x, layout.get(i).y, 0);
}
// initialize the views. Right views will be relative to left and will share the same baseline
int left_to_right_idx = structure.addMotion(false, left_to_right);
for (int viewIndex = 0; viewIndex < numViews; viewIndex++) {
int world_to_left_idx = structure.addMotion(false, structureLeft.motions.get(viewIndex).motion);
structure.setView(viewIndex * 2, 0, world_to_left_idx, -1);
structure.setView(viewIndex * 2 + 1, 1, left_to_right_idx, viewIndex * 2);
}
// Add observations for left and right camera
observations.initialize(structure.views.size, true);
for (int viewIndex = 0; viewIndex < numViews; viewIndex++) {
SceneObservations.View oviewLeft = observations.getViewRigid(viewIndex * 2);
CalibrationObservation left = calibLeft.observations.get(viewIndex);
for (int j = 0; j < left.size(); j++) {
PointIndex2D_F64 p = left.get(j);
oviewLeft.add(p.index, (float) p.p.x, (float) p.p.y);
structure.connectPointToView(p.index, viewIndex * 2);
}
}
for (int viewIndex = 0; viewIndex < numViews; viewIndex++) {
SceneObservations.View oviewRight = observations.getViewRigid(viewIndex * 2 + 1);
CalibrationObservation right = calibRight.observations.get(viewIndex);
for (int j = 0; j < right.size(); j++) {
PointIndex2D_F64 p = right.get(j);
oviewRight.add(p.index, (float) p.p.x, (float) p.p.y);
structure.connectPointToView(p.index, viewIndex * 2 + 1);
}
}
if (verbose != null)
verbose.println("Joint bundle adjustment");
if (!bundleUtils.process())
return;
// save the output
structure.motions.get(left_to_right_idx).motion.invert(parameters.right_to_left);
BundleAdjustmentOps.convert(((BundlePinholeBrown) structure.cameras.get(0).model), parameters.left.width, parameters.left.height, parameters.left);
BundleAdjustmentOps.convert(((BundlePinholeBrown) structure.cameras.get(1).model), parameters.left.width, parameters.left.height, parameters.right);
}
use of boofcv.abst.geo.bundle.SceneStructureMetric in project BoofCV by lessthanoptimal.
the class MultiViewOps method scenePointsToPixels.
/**
* Projects points in the scene onto the specified image. Results are returned using a lambda that provides
* the points index, the point's 3D location in the camera frame, and the projected pixels.
*
* No checks are done for the following:
* <ul>
* <li>Was the feature observed by this view</li>
* <li>Does the feature appear behind the camera</li>
* <li>Is the projected pixel inside the image</li>
* </ul>
*
* @param scene (Input) Metric scene
* @param viewIdx (Input) Index of view for which points are projected
* @param function (Output) Called with results (index, 3D camera location, pixel)
*/
public static void scenePointsToPixels(SceneStructureMetric scene, int viewIdx, BoofLambdas.ProcessIndex2<Point3D_F64, Point2D_F64> function) {
Se3_F64 world_to_view = new Se3_F64();
SceneStructureMetric.View view = scene.views.get(viewIdx);
scene.getWorldToView(view, world_to_view, null);
BundleAdjustmentCamera camera = Objects.requireNonNull(scene.getViewCamera(view).model);
Point3D_F64 camPoint = new Point3D_F64();
Point2D_F64 pixel = new Point2D_F64();
for (int pointIdx = 0; pointIdx < scene.points.size; pointIdx++) {
SceneStructureCommon.Point point = scene.points.get(pointIdx);
double x = point.coordinate[0];
double y = point.coordinate[1];
double z = point.coordinate[2];
double w = scene.isHomogenous() ? point.coordinate[3] : 1.0;
// Project the pixel while being careful for points at infinity
SePointOps_F64.transformV(world_to_view, x, y, z, w, camPoint);
camera.project(camPoint.x, camPoint.y, camPoint.z, pixel);
function.process(pointIdx, camPoint, pixel);
}
}
Aggregations