use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ShowFeatureOrientationApp method setActiveAlgorithm.
@Override
public synchronized void setActiveAlgorithm(int indexFamily, String name, Object cookie) {
if (input == null)
return;
RegionOrientation orientation = (RegionOrientation) cookie;
orientation.setObjectRadius(10);
T workImage = ConvertBufferedImage.convertFromSingle(input, null, imageType);
AnyImageDerivative<T, D> deriv = GImageDerivativeOps.derivativeForScaleSpace(imageType, derivType);
deriv.setInput(workImage);
int r = 2;
GeneralFeatureDetector<T, D> detector = FactoryDetectPoint.createHarris(new ConfigGeneralDetector(NUM_FEATURES, r, 1), false, derivType);
D derivX = null, derivY = null, derivXX = null, derivYY = null, derivXY = null;
if (detector.getRequiresGradient()) {
derivX = deriv.getDerivative(true);
derivY = deriv.getDerivative(false);
} else if (detector.getRequiresHessian()) {
derivXX = deriv.getDerivative(true, true);
derivYY = deriv.getDerivative(false, false);
derivXY = deriv.getDerivative(true, false);
}
detector.process(workImage, derivX, derivY, derivXX, derivYY, derivXY);
QueueCorner points = detector.getMaximums();
FancyInterestPointRender render = new FancyInterestPointRender();
if (orientation instanceof OrientationGradient) {
((OrientationGradient<D>) orientation).setImage(deriv.getDerivative(true), deriv.getDerivative(false));
} else if (orientation instanceof OrientationIntegral) {
T ii = GIntegralImageOps.transform(workImage, null);
((OrientationIntegral<T>) orientation).setImage(ii);
} else if (orientation instanceof OrientationImageAverage) {
((OrientationImageAverage) orientation).setImage(workImage);
} else {
throw new IllegalArgumentException("Unknown algorithm type.");
}
for (int i = 0; i < points.size; i++) {
Point2D_I16 p = points.get(i);
double angle = orientation.compute(p.x, p.y);
render.addCircle(p.x, p.y, radius, Color.RED, angle);
}
BufferedImage temp = new BufferedImage(input.getWidth(), input.getHeight(), input.getType());
Graphics2D g2 = (Graphics2D) temp.getGraphics();
g2.drawImage(input, 0, 0, null);
g2.setStroke(new BasicStroke(2.5f));
render.draw(g2);
panel.setImage(temp);
panel.repaint();
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class IntensityPointFeatureApp method handleAlgorithmChanged.
private void handleAlgorithmChanged() {
ConfigGeneralDetector config = new ConfigGeneralDetector();
config.radius = controlPanel.radius;
config.maxFeatures = 200;
synchronized (lockAlgorithm) {
switch(controlPanel.selected) {
case "Laplacian":
config.detectMinimums = true;
detector = FactoryDetectPoint.createHessian(HessianBlobIntensity.Type.TRACE, config, derivType);
break;
case "Hessian Det":
detector = FactoryDetectPoint.createHessian(HessianBlobIntensity.Type.DETERMINANT, config, derivType);
break;
case "Harris":
detector = FactoryDetectPoint.createHarris(config, controlPanel.weighted, derivType);
break;
case "Shi Tomasi":
detector = FactoryDetectPoint.createShiTomasi(config, controlPanel.weighted, derivType);
break;
case "FAST":
detector = FactoryDetectPoint.createFast(null, config, imageType);
break;
case "KitRos":
detector = FactoryDetectPoint.createKitRos(config, derivType);
break;
case "Median":
detector = FactoryDetectPoint.createMedian(config, imageType);
break;
default:
throw new RuntimeException("Unknown exception");
}
}
reprocessImageOnly();
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryDepth method main.
public static void main(String[] args) throws IOException {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("kinect/straight");
// load camera description and the video sequence
VisualDepthParameters param = CalibrationIO.load(media.openFile(directory + "visualdepth.yaml"));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<>(1e-3);
// declares the algorithm
DepthVisualOdometry<GrayU8, GrayU16> visualOdometry = FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, sparseDepth, tracker, GrayU8.class, GrayU16.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<GrayU8> videoVisual = media.openVideo(directory + "rgb.mjpeg", ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(GrayU16.class));
while (videoVisual.hasNext()) {
GrayU8 visual = videoVisual.next();
GrayU16 depth = videoDepth.next();
if (!visualOdometry.process(visual, depth)) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryMonocularPlane method main.
public static void main(String[] args) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("vo/drc/");
// load camera description and the video sequence
MonoPlaneParameters calibration = CalibrationIO.load(media.openFile(directory + "mono_plane.yaml"));
SimpleImageSequence<GrayU8> video = media.openVideo(directory + "left.mjpeg", ImageType.single(GrayU8.class));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
// declares the algorithm
MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(calibration);
// Process the video sequence and output the location plus number of inliers
while (video.hasNext()) {
GrayU8 image = video.next();
if (!visualOdometry.process(image)) {
System.out.println("Fault!");
visualOdometry.reset();
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryStereo method main.
public static void main(String[] args) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("vo/backyard/");
// load camera description and the video sequence
StereoParameters stereoParam = CalibrationIO.load(media.openFile(directory + "stereo.yaml"));
SimpleImageSequence<GrayU8> video1 = media.openVideo(directory + "left.mjpeg", ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU8> video2 = media.openVideo(directory + "right.mjpeg", ImageType.single(GrayU8.class));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
// computes the depth of each point
StereoDisparitySparse<GrayU8> disparity = FactoryStereoDisparity.regionSparseWta(0, 150, 3, 3, 30, -1, true, GrayU8.class);
// declares the algorithm
StereoVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.stereoDepth(1.5, 120, 2, 200, 50, true, disparity, tracker, GrayU8.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(stereoParam);
// Process the video sequence and output the location plus number of inliers
while (video1.hasNext()) {
GrayU8 left = video1.next();
GrayU8 right = video2.next();
if (!visualOdometry.process(left, right)) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
Aggregations