use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleBackgroundRemovalMoving method main.
public static void main(String[] args) {
// Example with a moving camera. Highlights why motion estimation is sometimes required
String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
// Camera has a bit of jitter in it. Static kinda works but motion reduces false positives
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 6;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
// This estimates the 2D image motion
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 5;
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
// However it runs faster because of this.
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
configGmm.initialVariance = 1600;
configGmm.significantWeight = 1e-1f;
// Comment/Uncomment to switch background mode
BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
background.setUnknownValue(1);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
// ====== Initialize Images
// storage for segmented image. Background = 0, Foreground = 1
GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
// Grey scale image that's the input for motion estimation
GrayF32 grey = new GrayF32(segmented.width, segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width / 2;
homeToWorld.a23 = grey.height / 2;
// Create a background image twice the size of the input image. Tell it that the home is in the center
background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Detections", true);
double fps = 0;
// smoothing factor for FPS
double alpha = 0.01;
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
GConvertImage.convert(input, grey);
if (!motion2D.process(grey)) {
throw new RuntimeException("Should handle this scenario");
}
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32, input);
long after = System.nanoTime();
fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
try {
Thread.sleep(5);
} catch (InterruptedException e) {
}
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExamplePointFeatureTracker method createKLT.
/**
* A simple way to create a Kanade-Lucas-Tomasi (KLT) tracker.
*/
public void createKLT() {
PkltConfig config = new PkltConfig();
config.templateRadius = 3;
config.pyramidScaling = new int[] { 1, 2, 4, 8 };
tracker = FactoryPointTracker.klt(config, new ConfigGeneralDetector(600, 6, 1), imageType, derivType);
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleTrackingKlt method main.
public static void main(String[] args) {
// tune the tracker for the image size and visual appearance
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(-1, 8, 1);
PkltConfig configKlt = new PkltConfig(3, new int[] { 1, 2, 4, 8 });
PointTracker<GrayF32> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayF32.class, null);
// Open a webcam at a resolution close to 640x480
Webcam webcam = UtilWebcamCapture.openDefault(640, 480);
// Create the panel used to display the image and feature tracks
ImagePanel gui = new ImagePanel();
gui.setPreferredSize(webcam.getViewSize());
ShowImages.showWindow(gui, "KLT Tracker", true);
int minimumTracks = 100;
while (true) {
BufferedImage image = webcam.getImage();
GrayF32 gray = ConvertBufferedImage.convertFrom(image, (GrayF32) null);
tracker.process(gray);
List<PointTrack> tracks = tracker.getActiveTracks(null);
// Spawn tracks if there are too few
if (tracks.size() < minimumTracks) {
tracker.spawnTracks();
tracks = tracker.getActiveTracks(null);
minimumTracks = tracks.size() / 2;
}
// Draw the tracks
Graphics2D g2 = image.createGraphics();
for (PointTrack t : tracks) {
VisualizeFeatures.drawPoint(g2, (int) t.x, (int) t.y, Color.RED);
}
gui.setImageUI(image);
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class VisualizeStereoVisualOdometryApp method createStereoDepth.
private StereoVisualOdometry<I> createStereoDepth(int whichAlg) {
Class derivType = GImageDerivativeOps.getDerivativeType(imageType);
StereoDisparitySparse<I> disparity = FactoryStereoDisparity.regionSparseWta(2, 150, 3, 3, 30, -1, true, imageType);
PkltConfig kltConfig = new PkltConfig();
kltConfig.templateRadius = 3;
kltConfig.pyramidScaling = new int[] { 1, 2, 4, 8 };
if (whichAlg == 0) {
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTrackerTwoPass<I> tracker = FactoryPointTrackerTwoPass.klt(kltConfig, configDetector, imageType, derivType);
return FactoryVisualOdometry.stereoDepth(1.5, 120, 2, 200, 50, false, disparity, tracker, imageType);
} else if (whichAlg == 1) {
ConfigGeneralDetector configExtract = new ConfigGeneralDetector(600, 3, 1);
GeneralFeatureDetector detector = FactoryPointTracker.createShiTomasi(configExtract, derivType);
DescribeRegionPoint describe = FactoryDescribeRegionPoint.brief(null, imageType);
ScoreAssociateHamming_B score = new ScoreAssociateHamming_B();
AssociateDescription2D<TupleDesc_B> associate = new AssociateDescTo2D<>(FactoryAssociation.greedy(score, 150, true));
PointTrackerTwoPass tracker = FactoryPointTrackerTwoPass.dda(detector, describe, associate, null, 1, imageType);
return FactoryVisualOdometry.stereoDepth(1.5, 80, 3, 200, 50, false, disparity, tracker, imageType);
} else if (whichAlg == 2) {
PointTracker<I> tracker = FactoryPointTracker.combined_ST_SURF_KLT(new ConfigGeneralDetector(600, 3, 0), kltConfig, 50, null, null, imageType, derivType);
PointTrackerTwoPass<I> twopass = new PointTrackerToTwoPass<>(tracker);
return FactoryVisualOdometry.stereoDepth(1.5, 80, 3, 200, 50, false, disparity, twopass, imageType);
} else if (whichAlg == 3) {
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTracker<I> trackerLeft = FactoryPointTracker.klt(kltConfig, configDetector, imageType, derivType);
PointTracker<I> trackerRight = FactoryPointTracker.klt(kltConfig, configDetector, imageType, derivType);
DescribeRegionPoint describe = FactoryDescribeRegionPoint.surfFast(null, imageType);
return FactoryVisualOdometry.stereoDualTrackerPnP(90, 2, 1.5, 1.5, 200, 50, trackerLeft, trackerRight, describe, imageType);
} else if (whichAlg == 4) {
// GeneralFeatureIntensity intensity =
// FactoryIntensityPoint.hessian(HessianBlobIntensity.Type.TRACE,defaultType);
GeneralFeatureIntensity intensity = FactoryIntensityPoint.shiTomasi(1, false, imageType);
NonMaxSuppression nonmax = FactoryFeatureExtractor.nonmax(new ConfigExtract(2, 50, 0, true, false, true));
GeneralFeatureDetector general = new GeneralFeatureDetector(intensity, nonmax);
general.setMaxFeatures(600);
DetectorInterestPointMulti detector = new GeneralToInterestMulti(general, 2, imageType, derivType);
// DescribeRegionPoint describe = FactoryDescribeRegionPoint.brief(new ConfigBrief(true),defaultType);
// DescribeRegionPoint describe = FactoryDescribeRegionPoint.pixelNCC(5,5,defaultType);
DescribeRegionPoint describe = FactoryDescribeRegionPoint.surfFast(null, imageType);
DetectDescribeMulti detDescMulti = new DetectDescribeMultiFusion(detector, null, describe);
return FactoryVisualOdometry.stereoQuadPnP(1.5, 0.5, 75, Double.MAX_VALUE, 300, 50, detDescMulti, imageType);
} else {
throw new RuntimeException("Unknown selection");
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleVideoMosaic method main.
public static void main(String[] args) {
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 1;
confDetector.maxFeatures = 300;
confDetector.radius = 3;
// Use a KLT tracker
PointTracker<GrayF32> tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, GrayF32.class);
// This estimates the 2D image motion
// An Affine2D_F64 model also works quite well.
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(220, 3, 2, 30, 0.6, 0.5, false, tracker, new Homography2D_F64());
// wrap it so it output color images while estimating motion from gray
ImageMotion2D<Planar<GrayF32>, Homography2D_F64> motion2DColor = new PlToGrayMotion2D<>(motion2D, GrayF32.class);
// This fuses the images together
StitchingFromMotion2D<Planar<GrayF32>, Homography2D_F64> stitch = FactoryMotion2D.createVideoStitch(0.5, motion2DColor, ImageType.pl(3, GrayF32.class));
// Load an image sequence
MediaManager media = DefaultMediaManager.INSTANCE;
String fileName = UtilIO.pathExample("mosaic/airplane01.mjpeg");
SimpleImageSequence<Planar<GrayF32>> video = media.openVideo(fileName, ImageType.pl(3, GrayF32.class));
Planar<GrayF32> frame = video.next();
// shrink the input image and center it
Homography2D_F64 shrink = new Homography2D_F64(0.5, 0, frame.width / 4, 0, 0.5, frame.height / 4, 0, 0, 1);
shrink = shrink.invert(null);
// The mosaic will be larger in terms of pixels but the image will be scaled down.
// To change this into stabilization just make it the same size as the input with no shrink.
stitch.configure(frame.width, frame.height, shrink);
// process the first frame
stitch.process(frame);
// Create the GUI for displaying the results + input image
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImage(0, 0, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.setImage(0, 1, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.setPreferredSize(new Dimension(3 * frame.width, frame.height * 2));
ShowImages.showWindow(gui, "Example Mosaic", true);
boolean enlarged = false;
// process the video sequence one frame at a time
while (video.hasNext()) {
frame = video.next();
if (!stitch.process(frame))
throw new RuntimeException("You should handle failures");
// if the current image is close to the image border recenter the mosaic
StitchingFromMotion2D.Corners corners = stitch.getImageCorners(frame.width, frame.height, null);
if (nearBorder(corners.p0, stitch) || nearBorder(corners.p1, stitch) || nearBorder(corners.p2, stitch) || nearBorder(corners.p3, stitch)) {
stitch.setOriginToCurrent();
// only enlarge the image once
if (!enlarged) {
enlarged = true;
// double the image size and shift it over to keep it centered
int widthOld = stitch.getStitchedImage().width;
int heightOld = stitch.getStitchedImage().height;
int widthNew = widthOld * 2;
int heightNew = heightOld * 2;
int tranX = (widthNew - widthOld) / 2;
int tranY = (heightNew - heightOld) / 2;
Homography2D_F64 newToOldStitch = new Homography2D_F64(1, 0, -tranX, 0, 1, -tranY, 0, 0, 1);
stitch.resizeStitchImage(widthNew, heightNew, newToOldStitch);
gui.setImage(0, 1, new BufferedImage(widthNew, heightNew, BufferedImage.TYPE_INT_RGB));
}
corners = stitch.getImageCorners(frame.width, frame.height, null);
}
// display the mosaic
ConvertBufferedImage.convertTo(frame, gui.getImage(0, 0), true);
ConvertBufferedImage.convertTo(stitch.getStitchedImage(), gui.getImage(0, 1), true);
// draw a red quadrilateral around the current frame in the mosaic
Graphics2D g2 = gui.getImage(0, 1).createGraphics();
g2.setColor(Color.RED);
g2.drawLine((int) corners.p0.x, (int) corners.p0.y, (int) corners.p1.x, (int) corners.p1.y);
g2.drawLine((int) corners.p1.x, (int) corners.p1.y, (int) corners.p2.x, (int) corners.p2.y);
g2.drawLine((int) corners.p2.x, (int) corners.p2.y, (int) corners.p3.x, (int) corners.p3.y);
g2.drawLine((int) corners.p3.x, (int) corners.p3.y, (int) corners.p0.x, (int) corners.p0.y);
gui.repaint();
// throttle the speed just in case it's on a fast computer
BoofMiscOps.pause(50);
}
}
Aggregations