use of boofcv.abst.feature.detect.interest.ConfigPointDetector in project BoofCV by lessthanoptimal.
the class ExampleVideoStabilization method main.
public static void main(String[] args) {
// Configure the feature detector
ConfigPointDetector configDetector = new ConfigPointDetector();
configDetector.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 300;
configDetector.general.threshold = 10;
configDetector.general.radius = 2;
// Use a KLT tracker
PointTracker<GrayF32> tracker = FactoryPointTracker.klt(4, configDetector, 3, GrayF32.class, GrayF32.class);
// This estimates the 2D image motion
// An Affine2D_F64 model also works quite well.
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(200, 3, 2, 30, 0.6, 0.5, false, tracker, new Homography2D_F64());
// wrap it so it output color images while estimating motion from gray
ImageMotion2D<Planar<GrayF32>, Homography2D_F64> motion2DColor = new PlToGrayMotion2D<>(motion2D, GrayF32.class);
// This fuses the images together
StitchingFromMotion2D<Planar<GrayF32>, Homography2D_F64> stabilize = FactoryMotion2D.createVideoStitch(0.5, motion2DColor, ImageType.pl(3, GrayF32.class));
// Load an image sequence
MediaManager media = DefaultMediaManager.INSTANCE;
String fileName = UtilIO.pathExample("shake.mjpeg");
SimpleImageSequence<Planar<GrayF32>> video = media.openVideo(fileName, ImageType.pl(3, GrayF32.class));
Planar<GrayF32> frame = video.next();
// The output image size is the same as the input image size
stabilize.configure(frame.width, frame.height, null);
// process the first frame
stabilize.process(frame);
// Create the GUI for displaying the results + input image
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImage(0, 0, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.setImage(0, 1, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.autoSetPreferredSize();
ShowImages.showWindow(gui, "Example Stabilization", true);
// process the video sequence one frame at a time
while (video.hasNext()) {
if (!stabilize.process(video.next()))
throw new RuntimeException("Don't forget to handle failures!");
// display the stabilized image
ConvertBufferedImage.convertTo(frame, gui.getImage(0, 0), true);
ConvertBufferedImage.convertTo(stabilize.getStitchedImage(), gui.getImage(0, 1), true);
gui.repaint();
// throttle the speed just in case it's on a fast computer
BoofMiscOps.pause(50);
}
}
use of boofcv.abst.feature.detect.interest.ConfigPointDetector in project BoofCV by lessthanoptimal.
the class TrackerPointControlPanel method createKltDetectConfig.
static ConfigPointDetector createKltDetectConfig(int maxFeatures) {
ConfigPointDetector config = new ConfigPointDetector();
config.type = PointDetectorTypes.SHI_TOMASI;
config.general.maxFeatures = maxFeatures;
config.general.radius = 5;
config.general.threshold = 3.0f;
return config;
}
use of boofcv.abst.feature.detect.interest.ConfigPointDetector in project BoofCV by lessthanoptimal.
the class VisualizeMonocularPlaneVisualOdometryApp method createVisualOdometry.
private MonocularPlaneVisualOdometry<I> createVisualOdometry(int whichAlg) {
Class derivType = GImageDerivativeOps.getDerivativeType(imageClass);
if (whichAlg == 0) {
var config = new ConfigPlanarTrackPnP();
config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
config.tracker.klt.pyramidLevels = ConfigDiscreteLevels.levels(4);
config.tracker.klt.templateRadius = 3;
config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
config.tracker.detDesc.detectPoint.general.radius = 3;
config.tracker.detDesc.detectPoint.general.threshold = 1;
config.thresholdAdd = 75;
config.thresholdRetire = 2;
config.ransac.iterations = 200;
config.ransac.inlierThreshold = 1.5;
return FactoryVisualOdometry.monoPlaneInfinity(config, imageClass);
} else if (whichAlg == 1) {
ConfigPKlt configKlt = new ConfigPKlt();
configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
configKlt.templateRadius = 3;
ConfigPointDetector configDetector = new ConfigPointDetector();
configDetector.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 600;
configDetector.general.radius = 3;
configDetector.general.threshold = 1;
PointTracker<I> tracker = FactoryPointTracker.klt(configKlt, configDetector, imageClass, derivType);
double cellSize = 0.06;
double inlierGroundTol = 1.5;
return FactoryVisualOdometry.monoPlaneOverhead(cellSize, 25, 0.7, inlierGroundTol, 300, 2, 100, 0.5, 0.6, tracker, imageType);
} else {
throw new RuntimeException("Unknown selection");
}
}
Aggregations