use of boofcv.struct.sfm.Point2D3DTrack in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP method changePoseToReference.
/**
* Updates the relative position of all points so that the current frame is the reference frame. Mathematically
* this is not needed, but should help keep numbers from getting too large.
*/
private void changePoseToReference() {
Se3_F64 keyToCurr = currToKey.invert(null);
List<PointTrack> all = tracker.getAllTracks(null);
for (PointTrack t : all) {
Point2D3DTrack p = t.getCookie();
SePointOps_F64.transform(keyToCurr, p.location, p.location);
}
concatMotion();
}
use of boofcv.struct.sfm.Point2D3DTrack in project MAVSlam by ecmnet.
the class MAVOdomPixelDepthPnP method changePoseToReference.
/**
* Updates the relative position of all points so that the current frame is
* the reference frame. Mathematically this is not needed, but should help
* keep numbers from getting too large.
*/
private void changePoseToReference() {
Se3_F64 keyToCurr = currToKey.invert(null);
List<PointTrack> all = tracker.getAllTracks(null);
for (PointTrack t : all) {
Point2D3DTrack p = t.getCookie();
SePointOps_F64.transform(keyToCurr, p.location, p.location);
}
concatMotion();
}
use of boofcv.struct.sfm.Point2D3DTrack in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP method estimateMotion.
/**
* Estimates motion from the set of tracks and their 3D location
*
* @return true if successful.
*/
private boolean estimateMotion() {
List<PointTrack> active = tracker.getActiveTracks(null);
List<Point2D3D> obs = new ArrayList<>();
for (PointTrack t : active) {
Point2D3D p = t.getCookie();
pixelToNorm.compute(t.x, t.y, p.observation);
obs.add(p);
}
// estimate the motion up to a scale factor in translation
if (!motionEstimator.process(obs))
return false;
if (doublePass) {
if (!performSecondPass(active, obs))
return false;
}
tracker.finishTracking();
Se3_F64 keyToCurr;
if (refine != null) {
keyToCurr = new Se3_F64();
refine.fitModel(motionEstimator.getMatchSet(), motionEstimator.getModelParameters(), keyToCurr);
} else {
keyToCurr = motionEstimator.getModelParameters();
}
keyToCurr.invert(currToKey);
// mark tracks as being inliers and add to inlier list
int N = motionEstimator.getMatchSet().size();
for (int i = 0; i < N; i++) {
int index = motionEstimator.getInputIndex(i);
Point2D3DTrack t = active.get(index).getCookie();
t.lastInlier = tick;
inlierTracks.add(t);
}
return true;
}
use of boofcv.struct.sfm.Point2D3DTrack in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP method addNewTracks.
/**
* Detects new features and computes their 3D coordinates
*/
private void addNewTracks() {
// System.out.println("----------- Adding new tracks ---------------");
tracker.spawnTracks();
List<PointTrack> spawned = tracker.getNewTracks(null);
// estimate 3D coordinate using stereo vision
for (PointTrack t : spawned) {
Point2D3DTrack p = t.getCookie();
if (p == null) {
t.cookie = p = new Point2D3DTrack();
}
// discard point if it can't localized
if (!pixelTo3D.process(t.x, t.y) || pixelTo3D.getW() == 0) {
tracker.dropTrack(t);
} else {
Point3D_F64 X = p.getLocation();
double w = pixelTo3D.getW();
X.set(pixelTo3D.getX() / w, pixelTo3D.getY() / w, pixelTo3D.getZ() / w);
// translate the point into the key frame
// SePointOps_F64.transform(currToKey,X,X);
// not needed since the current frame was just set to be the key frame
p.lastInlier = tick;
pixelToNorm.compute(t.x, t.y, p.observation);
}
}
}
use of boofcv.struct.sfm.Point2D3DTrack in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP method dropUnusedTracks.
/**
* Removes tracks which have not been included in the inlier set recently
*
* @return Number of dropped tracks
*/
private int dropUnusedTracks() {
List<PointTrack> all = tracker.getAllTracks(null);
int num = 0;
for (PointTrack t : all) {
Point2D3DTrack p = t.getCookie();
if (tick - p.lastInlier > thresholdRetire) {
tracker.dropTrack(t);
num++;
}
}
return num;
}
Aggregations