use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.
the class TestPointTransformHomography_F64 method compareToDirect.
/**
* Directly computes the output
*/
@Test
public void compareToDirect() {
Point2D_F64 input = new Point2D_F64(50, 60);
Point2D_F64 output = new Point2D_F64();
Point2D_F64 expected = new Point2D_F64();
Homography2D_F64 H = new Homography2D_F64(1, 2, 3, 4, 5, 6, 7, 8, 9);
HomographyPointOps_F64.transform(H, input, expected);
PointTransformHomography_F64 alg = new PointTransformHomography_F64();
alg.set(H);
alg.compute(input.x, input.y, output);
assertEquals(expected.x, output.x, 1e-4);
assertEquals(expected.y, output.y, 1e-4);
}
use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.
the class FactoryMotion2D method createMotion2D.
/**
* Estimates the 2D motion of an image using different models.
*
* @param ransacIterations Number of RANSAC iterations
* @param inlierThreshold Threshold which defines an inlier.
* @param outlierPrune If a feature is an outlier for this many turns in a row it is dropped. Try 2
* @param absoluteMinimumTracks New features will be respawned if the number of inliers drop below this number.
* @param respawnTrackFraction If the fraction of current inliers to the original number of inliers drops below
* this fraction then new features are spawned. Try 0.3
* @param respawnCoverageFraction If the area covered drops by this fraction then spawn more features. Try 0.8
* @param refineEstimate Should it refine the model estimate using all inliers.
* @param tracker Point feature tracker.
* @param motionModel Instance of the model model used. Affine2D_F64 or Homography2D_F64
* @param <I> Image input type.
* @param <IT> Model model
* @return ImageMotion2D
*/
public static <I extends ImageBase<I>, IT extends InvertibleTransform> ImageMotion2D<I, IT> createMotion2D(int ransacIterations, double inlierThreshold, int outlierPrune, int absoluteMinimumTracks, double respawnTrackFraction, double respawnCoverageFraction, boolean refineEstimate, PointTracker<I> tracker, IT motionModel) {
ModelManager<IT> manager;
ModelGenerator<IT, AssociatedPair> fitter;
DistanceFromModel<IT, AssociatedPair> distance;
ModelFitter<IT, AssociatedPair> modelRefiner = null;
if (motionModel instanceof Homography2D_F64) {
GenerateHomographyLinear mf = new GenerateHomographyLinear(true);
manager = (ModelManager) new ModelManagerHomography2D_F64();
fitter = (ModelGenerator) mf;
if (refineEstimate)
modelRefiner = (ModelFitter) mf;
distance = (DistanceFromModel) new DistanceHomographySq();
} else if (motionModel instanceof Affine2D_F64) {
manager = (ModelManager) new ModelManagerAffine2D_F64();
GenerateAffine2D mf = new GenerateAffine2D();
fitter = (ModelGenerator) mf;
if (refineEstimate)
modelRefiner = (ModelFitter) mf;
distance = (DistanceFromModel) new DistanceAffine2DSq();
} else if (motionModel instanceof Se2_F64) {
manager = (ModelManager) new ModelManagerSe2_F64();
MotionTransformPoint<Se2_F64, Point2D_F64> alg = new MotionSe2PointSVD_F64();
GenerateSe2_AssociatedPair mf = new GenerateSe2_AssociatedPair(alg);
fitter = (ModelGenerator) mf;
distance = (DistanceFromModel) new DistanceSe2Sq();
// no refine, already optimal
} else {
throw new RuntimeException("Unknown model type: " + motionModel.getClass().getSimpleName());
}
ModelMatcher<IT, AssociatedPair> modelMatcher = new Ransac(123123, manager, fitter, distance, ransacIterations, inlierThreshold);
ImageMotionPointTrackerKey<I, IT> lowlevel = new ImageMotionPointTrackerKey<>(tracker, modelMatcher, modelRefiner, motionModel, outlierPrune);
ImageMotionPtkSmartRespawn<I, IT> smartRespawn = new ImageMotionPtkSmartRespawn<>(lowlevel, absoluteMinimumTracks, respawnTrackFraction, respawnCoverageFraction);
return new WrapImageMotionPtkSmartRespawn<>(smartRespawn);
}
use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.
the class ExampleBackgroundRemovalMoving method main.
public static void main(String[] args) {
// Example with a moving camera. Highlights why motion estimation is sometimes required
String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
// Camera has a bit of jitter in it. Static kinda works but motion reduces false positives
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 6;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
// This estimates the 2D image motion
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 5;
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
// However it runs faster because of this.
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
configGmm.initialVariance = 1600;
configGmm.significantWeight = 1e-1f;
// Comment/Uncomment to switch background mode
BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
background.setUnknownValue(1);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
// ====== Initialize Images
// storage for segmented image. Background = 0, Foreground = 1
GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
// Grey scale image that's the input for motion estimation
GrayF32 grey = new GrayF32(segmented.width, segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width / 2;
homeToWorld.a23 = grey.height / 2;
// Create a background image twice the size of the input image. Tell it that the home is in the center
background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Detections", true);
double fps = 0;
// smoothing factor for FPS
double alpha = 0.01;
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
GConvertImage.convert(input, grey);
if (!motion2D.process(grey)) {
throw new RuntimeException("Should handle this scenario");
}
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32, input);
long after = System.nanoTime();
fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
try {
Thread.sleep(5);
} catch (InterruptedException e) {
}
}
}
use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.
the class TestGenerateHomographyLinear method createRandomModel.
@Override
public Homography2D_F64 createRandomModel() {
Homography2D_F64 model = new Homography2D_F64();
model.a11 = rand.nextDouble();
model.a12 = rand.nextDouble();
model.a13 = rand.nextDouble();
model.a21 = rand.nextDouble();
model.a22 = rand.nextDouble();
model.a23 = rand.nextDouble();
model.a31 = rand.nextDouble();
model.a32 = rand.nextDouble();
model.a33 = rand.nextDouble();
return model;
}
use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.
the class TestDistanceHomographySq method createRandomModel.
@Override
public Homography2D_F64 createRandomModel() {
Homography2D_F64 h = new Homography2D_F64();
h.a11 = rand.nextDouble() * 5;
h.a12 = rand.nextDouble() * 5;
h.a13 = rand.nextDouble() * 5;
h.a21 = rand.nextDouble() * 5;
h.a22 = rand.nextDouble() * 5;
h.a23 = rand.nextDouble() * 5;
h.a31 = rand.nextDouble() * 5;
h.a32 = rand.nextDouble() * 5;
h.a33 = rand.nextDouble() * 5;
return h;
}
Aggregations