use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.
the class ExampleBackgroundRemovalMoving method main.
public static void main(String[] args) {
// Example with a moving camera. Highlights why motion estimation is sometimes required
String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
// Camera has a bit of jitter in it. Static kinda works but motion reduces false positives
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 6;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
// This estimates the 2D image motion
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 5;
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
// However it runs faster because of this.
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
configGmm.initialVariance = 1600;
configGmm.significantWeight = 1e-1f;
// Comment/Uncomment to switch background mode
BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
background.setUnknownValue(1);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
// ====== Initialize Images
// storage for segmented image. Background = 0, Foreground = 1
GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
// Grey scale image that's the input for motion estimation
GrayF32 grey = new GrayF32(segmented.width, segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width / 2;
homeToWorld.a23 = grey.height / 2;
// Create a background image twice the size of the input image. Tell it that the home is in the center
background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Detections", true);
double fps = 0;
// smoothing factor for FPS
double alpha = 0.01;
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
GConvertImage.convert(input, grey);
if (!motion2D.process(grey)) {
throw new RuntimeException("Should handle this scenario");
}
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32, input);
long after = System.nanoTime();
fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
try {
Thread.sleep(5);
} catch (InterruptedException e) {
}
}
}
use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.
the class ExampleBackgroundRemovalStationary method main.
public static void main(String[] args) {
String fileName = UtilIO.pathExample("background/street_intersection.mp4");
// String fileName = UtilIO.pathExample("background/rubixfire.mp4"); // dynamic background
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4"); // degraded performance because of jitter
// String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg"); // Camera moves. Stationary will fail here
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
// Comment/Uncomment to switch algorithms
BackgroundModelStationary background = FactoryBackgroundModel.stationaryBasic(new ConfigBackgroundBasic(35, 0.005f), imageType);
// FactoryBackgroundModel.stationaryGmm(configGmm, imageType);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
// Declare storage for segmented image. 1 = moving foreground and 0 = background
GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Static Scene: Background Segmentation", true);
double fps = 0;
// smoothing factor for FPS
double alpha = 0.01;
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
background.updateBackground(input, segmented);
long after = System.nanoTime();
fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
try {
Thread.sleep(5);
} catch (InterruptedException e) {
}
}
System.out.println("done!");
}
use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.
the class CompareHessianToConvolution method setKernel.
public void setKernel(int which, Kernel1D horizontal, Kernel1D vertical) {
ImageType _inputType = ImageType.single(inputType);
FilterImageInterface<?, ?> f1 = FactoryConvolve.convolve(horizontal, _inputType, _inputType, BorderType.EXTENDED, true);
FilterImageInterface<?, ?> f2 = FactoryConvolve.convolve(vertical, _inputType, _inputType, BorderType.EXTENDED, false);
outputFilters[which] = new FilterSequence(f1, f2);
if (borderSize < horizontal.getRadius())
borderSize = horizontal.getRadius();
if (borderSize < vertical.getRadius())
borderSize = vertical.getRadius();
}
use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.
the class GenericFiducialDetectorChecks method checkRemoveIntrinsic.
/**
* Provide an intrinsic model then remove it
*/
@Test
public void checkRemoveIntrinsic() {
for (ImageType type : types) {
ImageBase image = loadImage(type);
// detect with no intrinsics
FiducialDetector detector = createDetector(type);
detector.detect(image);
assertFalse(detector.is3D());
assertTrue(detector.totalFound() >= 1);
Results expected = extractResults(detector);
checkBounds(detector);
// detect with intrinsics
detector.setLensDistortion(loadDistortion(true), image.width, image.height);
assertTrue(detector.is3D());
assertTrue(detector.totalFound() >= 1);
// detect without intrinsics again
detector.setLensDistortion(null, 0, 0);
assertFalse(detector.is3D());
assertTrue(detector.totalFound() >= 1);
Results found = extractResults(detector);
// compare results
assertEquals(expected.id.length, found.id.length);
for (int i = 0; i < expected.id.length; i++) {
assertEquals(expected.id[i], found.id[i]);
assertTrue(expected.pose.get(i).T.distance(found.pose.get(i).T) <= 1e-4);
assertTrue(MatrixFeatures_DDRM.isIdentical(expected.pose.get(i).getR(), found.pose.get(i).R, 1e-4));
assertTrue(found.pixel.get(i).distance(expected.pixel.get(i)) <= 1e-4);
}
}
}
use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.
the class GenericFiducialDetectorChecks method clearLensDistortion.
/**
* Make sure lens distortion is removed if it was set previously and then removed
*/
// TODO remove test? This should be a non-issue now
@Test
public void clearLensDistortion() {
for (ImageType type : types) {
ImageBase image = loadImage(type);
FiducialDetector detector = createDetector(type);
// save the results
detector.setLensDistortion(loadDistortion(false), image.width, image.height);
detector.detect(image);
assertTrue(detector.totalFound() >= 1);
Results before = extractResults(detector);
// run with lens distortion
detector.setLensDistortion(loadDistortion(true), image.width, image.height);
detector.detect(image);
// remove lens distortion
detector.setLensDistortion(loadDistortion(false), image.width, image.height);
detector.detect(image);
Results after = extractResults(detector);
// see if it's the same
for (int i = 0; i < after.id.length; i++) {
assertEquals(before.id[i], after.id[i]);
assertEquals(0, before.pose.get(i).T.distance(after.pose.get(i).T), 1e-8);
assertTrue(MatrixFeatures_DDRM.isIdentical(before.pose.get(i).R, after.pose.get(i).R, 1e-8));
assertEquals(0, before.pixel.get(i).distance(after.pixel.get(i)), 1e-8);
}
}
}
Aggregations