use of boofcv.abst.tracker.PointTrack in project BoofCV by lessthanoptimal.
the class VisOdomDualTrackPnP method selectCandidateStereoTracks.
/**
* Searches for tracks which are active and meet the epipolar constraints
*/
private void selectCandidateStereoTracks() {
final long frameID = getFrameID();
// mark tracks in right frame that are active
List<PointTrack> activeRight = trackerRight.getActiveTracks(null);
for (PointTrack t : activeRight) {
// lint:forbidden ignore_line
TrackInfo bt = t.getCookie();
// If the visual track is null then it got dropped earlier
if (bt.visualTrack == null)
continue;
bt.lastSeenRightFrame = frameID;
initialVisible.add(bt);
}
List<PointTrack> activeLeft = trackerLeft.getActiveTracks(null);
candidates.clear();
for (PointTrack left : activeLeft) {
// lint:forbidden ignore_line
TrackInfo bt = left.getCookie();
if (bt.lastSeenRightFrame != frameID) {
continue;
}
if (bt.visualTrack == null)
throw new RuntimeException("BUG!!! Should have been skipped over in the right camera");
// check epipolar constraint and see if it is still valid
if (stereoCheck.checkPixel(bt.visualTrack.pixel, bt.visualRight.pixel)) {
bt.lastStereoFrame = frameID;
candidates.add(left);
}
}
if (verbose != null)
verbose.println("Visual Tracks: Left: " + activeLeft.size() + " Right: " + activeRight.size() + " Candidates: " + candidates.size());
}
use of boofcv.abst.tracker.PointTrack in project BoofCV by lessthanoptimal.
the class VisOdomDualTrackPnP method addInlierObservationsToScene.
private void addInlierObservationsToScene() {
// mark tracks that are in the inlier set and add their observations to the scene
int N = matcher.getMatchSet().size();
if (verbose != null)
verbose.println("Total Inliers " + N + " / " + candidates.size());
for (int i = 0; i < N; i++) {
int index = matcher.getInputIndex(i);
TrackInfo bt = candidates.get(index).getCookie();
if (bt.visualTrack == null)
throw new RuntimeException("BUG!");
bt.lastInlier = getFrameID();
bt.hasBeenInlier = true;
PointTrack l = bt.visualTrack;
PointTrack r = bt.visualRight;
bundleViso.addObservation(currentLeft, bt, l.pixel.x, l.pixel.y);
bundleViso.addObservation(currentRight, bt, r.pixel.x, r.pixel.y);
inlierTracks.add(bt);
}
}
use of boofcv.abst.tracker.PointTrack in project BoofCV by lessthanoptimal.
the class VisOdomDualTrackPnP method estimateMotion.
/**
* Given the set of active tracks, estimate the cameras motion robustly
*
* @return true if successful
*/
private boolean estimateMotion() {
CameraModel leftCM = cameraModels.get(CAMERA_LEFT);
CameraModel rightCM = cameraModels.get(CAMERA_RIGHT);
// Perform motion estimation relative to the most recent key frame
previousLeft.frame_to_world.invert(world_to_prev);
// Put observation and prior knowledge into a format the model matcher will understand
listStereo2D3D.reserve(candidates.size());
listStereo2D3D.reset();
for (int candidateIdx = 0; candidateIdx < candidates.size(); candidateIdx++) {
PointTrack l = candidates.get(candidateIdx);
Stereo2D3D stereo = listStereo2D3D.grow();
// Get the track location
TrackInfo bt = l.getCookie();
PointTrack r = bt.visualRight;
// Get the 3D coordinate of the point in the 'previous' frame
SePointOps_F64.transform(world_to_prev, bt.worldLoc, prevLoc4);
PerspectiveOps.homogenousTo3dPositiveZ(prevLoc4, 1e8, 1e-8, stereo.location);
// compute normalized image coordinate for track in left and right image
leftCM.pixelToNorm.compute(l.pixel.x, l.pixel.y, stereo.leftObs);
rightCM.pixelToNorm.compute(r.pixel.x, r.pixel.y, stereo.rightObs);
// TODO Could this transform be done just once?
}
// Robustly estimate left camera motion
if (!matcher.process(listStereo2D3D.toList()))
return false;
if (modelRefiner != null) {
modelRefiner.fitModel(matcher.getMatchSet(), matcher.getModelParameters(), previous_to_current);
} else {
previous_to_current.setTo(matcher.getModelParameters());
}
// Convert the found transforms back to world
previous_to_current.invert(current_to_previous);
current_to_previous.concat(previousLeft.frame_to_world, currentLeft.frame_to_world);
right_to_left.concat(currentLeft.frame_to_world, currentRight.frame_to_world);
return true;
}
use of boofcv.abst.tracker.PointTrack in project BoofCV by lessthanoptimal.
the class VisOdomMonoPlaneInfinity method changeCurrToReference.
/**
* Updates the relative position of all points so that the current frame is the reference frame. Mathematically
* this is not needed, but should help keep numbers from getting too large.
*/
private void changeCurrToReference() {
Se2_F64 keyToCurr = currToKey.invert(null);
List<PointTrack> all = tracker.getAllTracks(null);
for (PointTrack t : all) {
// lint:forbidden ignore_line
VoTrack p = t.getCookie();
if (p.onPlane) {
SePointOps_F64.transform(keyToCurr, p.ground, p.ground);
} else {
GeometryMath_F64.rotate(keyToCurr.c, keyToCurr.s, p.ground, p.ground);
}
}
concatMotion();
}
use of boofcv.abst.tracker.PointTrack in project BoofCV by lessthanoptimal.
the class TestImageMotionPointTrackerKey method testPrune.
/**
* See if tracks are pruned after not being in inlier set for X time
*/
@Test
void testPrune() {
Se2_F32 computed = new Se2_F32(4, 5, 6);
Se2_F32 model = new Se2_F32();
DummyTracker tracker = new DummyTracker();
DummyModelMatcher<Se2_F32> matcher = new DummyModelMatcher<>(computed, 5);
GrayU8 input = new GrayU8(20, 30);
ImageMotionPointTrackerKey<GrayU8, Se2_F32> alg = new ImageMotionPointTrackerKey<>(tracker, matcher, null, model, 5);
// create tracks such that only some of them will be dropped
tracker.frameID = 9;
for (int i = 0; i < 10; i++) {
PointTrack t = new PointTrack();
AssociatedPairTrack a = new AssociatedPairTrack();
a.lastUsed = i;
t.cookie = a;
tracker.list.add(t);
}
// update
alg.process(input);
// check to see how many were dropped
assertEquals(6, tracker.numDropped);
}
Aggregations