use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleVideoStabilization method main.
public static void main(String[] args) {
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 2;
// Use a KLT tracker
PointTracker<GrayF32> tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, GrayF32.class);
// This estimates the 2D image motion
// An Affine2D_F64 model also works quite well.
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(200, 3, 2, 30, 0.6, 0.5, false, tracker, new Homography2D_F64());
// wrap it so it output color images while estimating motion from gray
ImageMotion2D<Planar<GrayF32>, Homography2D_F64> motion2DColor = new PlToGrayMotion2D<>(motion2D, GrayF32.class);
// This fuses the images together
StitchingFromMotion2D<Planar<GrayF32>, Homography2D_F64> stabilize = FactoryMotion2D.createVideoStitch(0.5, motion2DColor, ImageType.pl(3, GrayF32.class));
// Load an image sequence
MediaManager media = DefaultMediaManager.INSTANCE;
String fileName = UtilIO.pathExample("shake.mjpeg");
SimpleImageSequence<Planar<GrayF32>> video = media.openVideo(fileName, ImageType.pl(3, GrayF32.class));
Planar<GrayF32> frame = video.next();
// The output image size is the same as the input image size
stabilize.configure(frame.width, frame.height, null);
// process the first frame
stabilize.process(frame);
// Create the GUI for displaying the results + input image
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImage(0, 0, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.setImage(0, 1, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
gui.autoSetPreferredSize();
ShowImages.showWindow(gui, "Example Stabilization", true);
// process the video sequence one frame at a time
while (video.hasNext()) {
if (!stabilize.process(video.next()))
throw new RuntimeException("Don't forget to handle failures!");
// display the stabilized image
ConvertBufferedImage.convertTo(frame, gui.getImage(0, 0), true);
ConvertBufferedImage.convertTo(stabilize.getStitchedImage(), gui.getImage(0, 1), true);
gui.repaint();
// throttle the speed just in case it's on a fast computer
BoofMiscOps.pause(50);
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class TestDdaManagerGeneralPoint method createTracker.
@Override
public PointTracker<GrayF32> createTracker() {
DescribePointBrief<GrayF32> brief = FactoryDescribePointAlgs.brief(FactoryBriefDefinition.gaussian2(new Random(123), 16, 512), FactoryBlurFilter.gaussian(ImageType.single(GrayF32.class), 0, 4));
GeneralFeatureDetector<GrayF32, GrayF32> corner = FactoryDetectPoint.createShiTomasi(new ConfigGeneralDetector(-1, 2, 0), false, GrayF32.class);
ScoreAssociateHamming_B score = new ScoreAssociateHamming_B();
AssociateDescription2D<TupleDesc_B> association = new AssociateDescTo2D<>(FactoryAssociation.greedy(score, 400, true));
DescribeRegionPoint<GrayF32, TupleDesc_B> describe = new WrapDescribeBrief<>(brief, GrayF32.class);
EasyGeneralFeatureDetector<GrayF32, GrayF32> easy = new EasyGeneralFeatureDetector<>(corner, GrayF32.class, GrayF32.class);
DdaManagerGeneralPoint<GrayF32, GrayF32, TupleDesc_B> manager;
manager = new DdaManagerGeneralPoint<>(easy, describe, 2);
DetectDescribeAssociate<GrayF32, TupleDesc_B> tracker = new DetectDescribeAssociate<>(manager, association, false);
return tracker;
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class TestMonoOverhead_to_MonocularPlaneVisualOdometry method createAlgorithm.
protected MonocularPlaneVisualOdometry<GrayU8> createAlgorithm() {
PkltConfig config = new PkltConfig();
config.pyramidScaling = new int[] { 1, 2, 4, 8 };
config.templateRadius = 3;
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(config, configDetector, GrayU8.class, GrayS16.class);
double cellSize = 0.015;
double ransacTol = 0.2;
return FactoryVisualOdometry.monoPlaneOverhead(cellSize, 25, 0.5, ransacTol, 300, 2, 30, 0.5, 0.3, tracker, ImageType.single(GrayU8.class));
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class TestWrapVisOdomPixelDepthPnP method createAlgorithm.
@Override
public StereoVisualOdometry<GrayF32> createAlgorithm() {
StereoDisparitySparse<GrayF32> disparity = FactoryStereoDisparity.regionSparseWta(2, 150, 3, 3, 30, -1, true, GrayF32.class);
PkltConfig config = new PkltConfig();
config.pyramidScaling = new int[] { 1, 2, 4, 8 };
config.templateRadius = 3;
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTrackerTwoPass<GrayF32> tracker = FactoryPointTrackerTwoPass.klt(config, configDetector, GrayF32.class, GrayF32.class);
return FactoryVisualOdometry.stereoDepth(1.5, 40, 2, 200, 50, false, disparity, tracker, GrayF32.class);
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project MAVSlam by ecmnet.
the class StreamRealSenseTest method start.
@Override
public void start(Stage primaryStage) {
primaryStage.setTitle("BoofCV RealSense Demo");
FlowPane root = new FlowPane();
root.getChildren().add(ivrgb);
ivrgb.setOnMouseMoved(event -> {
MouseEvent ev = event;
mouse_x = (int) ev.getX();
mouse_y = (int) ev.getY();
});
// RealSenseInfo info = new RealSenseInfo(320,240, RealSenseInfo.MODE_RGB);
RealSenseInfo info = new RealSenseInfo(640, 480, RealSenseInfo.MODE_RGB);
try {
realsense = new StreamRealSenseVisDepth(0, info);
} catch (Exception e) {
System.out.println("REALSENSE:" + e.getMessage());
return;
}
mouse_x = info.width / 2;
mouse_y = info.height / 2;
primaryStage.setScene(new Scene(root, info.width, info.height));
primaryStage.show();
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(900, 2, 1), GrayU8.class, GrayS16.class);
DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<GrayU16>(1e-3);
// declares the algorithm
MAVDepthVisualOdometry<GrayU8, GrayU16> visualOdometry = FactoryMAVOdometry.depthDepthPnP(1.2, 120, 2, 200, 50, true, sparseDepth, tracker, GrayU8.class, GrayU16.class);
visualOdometry.setCalibration(realsense.getIntrinsics(), new DoNothingPixelTransform_F32());
output = new BufferedImage(info.width, info.height, BufferedImage.TYPE_3BYTE_BGR);
wirgb = new WritableImage(info.width, info.height);
ivrgb.setImage(wirgb);
realsense.registerListener(new Listener() {
int fps;
float mouse_depth;
float md;
int mc;
int mf = 0;
int fpm;
@Override
public void process(Planar<GrayU8> rgb, GrayU16 depth, long timeRgb, long timeDepth) {
if ((System.currentTimeMillis() - tms) > 250) {
tms = System.currentTimeMillis();
if (mf > 0)
fps = fpm / mf;
if (mc > 0)
mouse_depth = md / mc;
mc = 0;
md = 0;
mf = 0;
fpm = 0;
}
mf++;
fpm += (int) (1f / ((timeRgb - oldTimeDepth) / 1000f) + 0.5f);
oldTimeDepth = timeRgb;
if (!visualOdometry.process(rgb.getBand(0), depth)) {
bus1.writeObject(position);
System.out.println("VO Failed!");
visualOdometry.reset();
return;
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
AccessPointTracks3D points = (AccessPointTracks3D) visualOdometry;
ConvertBufferedImage.convertTo(rgb, output, false);
Graphics c = output.getGraphics();
int count = 0;
float total = 0;
int dx = 0, dy = 0;
int dist = 999;
int x, y;
int index = -1;
for (int i = 0; i < points.getAllTracks().size(); i++) {
if (points.isInlier(i)) {
c.setColor(Color.BLUE);
x = (int) points.getAllTracks().get(i).x;
y = (int) points.getAllTracks().get(i).y;
int d = depth.get(x, y);
if (d > 0) {
int di = (int) Math.sqrt((x - mouse_x) * (x - mouse_x) + (y - mouse_y) * (y - mouse_y));
if (di < dist) {
index = i;
dx = x;
dy = y;
dist = di;
}
total++;
if (d < 500) {
c.setColor(Color.RED);
count++;
}
c.drawRect(x, y, 1, 1);
}
}
}
if (depth != null) {
if (index > -1)
System.out.println(visualOdometry.getTrackLocation(index));
mc++;
md = md + depth.get(dx, dy) / 1000f;
c.setColor(Color.GREEN);
c.drawOval(dx - 3, dy - 3, 6, 6);
}
c.setColor(Color.CYAN);
c.drawString("Fps:" + fps, 10, 20);
c.drawString(String.format("Loc: %4.2f %4.2f %4.2f", T.x, T.y, T.z), 10, info.height - 10);
c.drawString(String.format("Depth: %3.2f", mouse_depth), info.width - 85, info.height - 10);
position.x = T.x;
position.y = T.y;
position.z = T.z;
position.tms = timeRgb;
bus1.writeObject(position);
if ((count / total) > 0.6f) {
c.setColor(Color.RED);
c.drawString("WARNING!", info.width - 70, 20);
}
c.dispose();
Platform.runLater(() -> {
SwingFXUtils.toFXImage(output, wirgb);
});
}
}).start();
}
Aggregations