use of boofcv.gui.feature.ImageLinePanel in project BoofCV by lessthanoptimal.
the class VisualizeHoughFoot method process.
public void process(BufferedImage image) {
I input = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
I blur = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFromSingle(image, input, imageType);
GBlurImageOps.gaussian(input, blur, -1, 2, null);
DetectLineHoughFoot<I, D> alg = FactoryDetectLineAlgs.houghFoot(new ConfigHoughFoot(6, 12, 5, 25, 10), imageType, derivType);
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLines(alg.detect(blur));
gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
BufferedImage renderedTran = VisualizeImageData.grayMagnitude(alg.getTransform().getTransform(), null, -1);
BufferedImage renderedBinary = VisualizeBinaryData.renderBinary(alg.getBinary(), false, null);
ShowImages.showWindow(renderedBinary, "Detected Edges");
ShowImages.showWindow(renderedTran, "Parameter Space");
ShowImages.showWindow(gui, "Detected Lines");
}
use of boofcv.gui.feature.ImageLinePanel in project BoofCV by lessthanoptimal.
the class VisualizeHoughPolar method process.
public void process(BufferedImage image) {
I input = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
I blur = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
ConvertBufferedImage.convertFromSingle(image, input, imageType);
GBlurImageOps.gaussian(input, blur, -1, 2, null);
DetectLineHoughPolar<I, D> alg = FactoryDetectLineAlgs.houghPolar(new ConfigHoughPolar(5, 10, 2, Math.PI / 180, 25, 10), imageType, derivType);
List<LineParametric2D_F32> lines = alg.detect(blur);
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLines(lines);
gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
BufferedImage renderedTran = VisualizeImageData.grayMagnitude(alg.getTransform().getTransform(), null, -1);
BufferedImage renderedBinary = VisualizeBinaryData.renderBinary(alg.getBinary(), false, null);
// Draw the location of lines onto the magnitude image
Graphics2D g2 = renderedTran.createGraphics();
g2.setColor(Color.RED);
Point2D_F64 location = new Point2D_F64();
for (LineParametric2D_F32 l : lines) {
alg.getTransform().lineToCoordinate(l, location);
int r = 6;
int w = r * 2 + 1;
int x = (int) (location.x + 0.5);
int y = (int) (location.y + 0.5);
// System.out.println(x+" "+y+" "+renderedTran.getWidth()+" "+renderedTran.getHeight());
g2.drawOval(x - r, y - r, w, w);
}
ShowImages.showWindow(renderedBinary, "Detected Edges");
ShowImages.showWindow(renderedTran, "Parameter Space");
ShowImages.showWindow(gui, "Detected Lines");
}
use of boofcv.gui.feature.ImageLinePanel in project BoofCV by lessthanoptimal.
the class ExampleLineDetection method detectLineSegments.
/**
* Detects segments inside the image
*
* @param image Input image.
* @param imageType Type of image processed by line detector.
* @param derivType Type of image derivative.
*/
public static <T extends ImageGray<T>, D extends ImageGray<D>> void detectLineSegments(BufferedImage image, Class<T> imageType, Class<D> derivType) {
// convert the line into a single band image
T input = ConvertBufferedImage.convertFromSingle(image, null, imageType);
// Comment/uncomment to try a different type of line detector
DetectLineSegmentsGridRansac<T, D> detector = FactoryDetectLineAlgs.lineRansac(40, 30, 2.36, true, imageType, derivType);
List<LineSegment2D_F32> found = detector.detect(input);
// display the results
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLineSegments(found);
gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
listPanel.addItem(gui, "Found Line Segments");
}
use of boofcv.gui.feature.ImageLinePanel in project BoofCV by lessthanoptimal.
the class VisualizeLineRansac method process.
public void process(@Nullable BufferedImage image) {
Objects.requireNonNull(image);
// int regionSize = 40;
I input = GeneralizedImageOps.createSingleBand(imageType, image.getWidth(), image.getHeight());
D derivX = GeneralizedImageOps.createSingleBand(derivType, image.getWidth(), image.getHeight());
D derivY = GeneralizedImageOps.createSingleBand(derivType, image.getWidth(), image.getHeight());
GrayF32 edgeIntensity = new GrayF32(input.width, input.height);
// GrayF32 suppressed = new GrayF32(input.width,input.height);
// GrayF32 orientation = new GrayF32(input.width,input.height);
// GrayS8 direction = new GrayS8(input.width,input.height);
GrayU8 detected = new GrayU8(input.width, input.height);
ModelManager<LinePolar2D_F32> manager = new ModelManagerLinePolar2D_F32();
ModelMatcherPost<LinePolar2D_F32, Edgel> matcher = new Ransac<>(123123, 25, 1, manager, Edgel.class);
matcher.setModel(() -> new GridLineModelFitter((float) (Math.PI * 0.75)), () -> new GridLineModelDistance((float) (Math.PI * 0.75)));
ImageGradient<I, D> gradient = FactoryDerivative.sobel(imageType, derivType);
System.out.println("Image width " + input.width + " height " + input.height);
ConvertBufferedImage.convertFromSingle(image, input, imageType);
gradient.process(input, derivX, derivY);
GGradientToEdgeFeatures.intensityAbs(derivX, derivY, edgeIntensity);
// non-max suppression on the lines
// GGradientToEdgeFeatures.direction(derivX,derivY,orientation);
// GradientToEdgeFeatures.discretizeDirection4(orientation,direction);
// GradientToEdgeFeatures.nonMaxSuppression4(edgeIntensity,direction,suppressed);
GThresholdImageOps.threshold(edgeIntensity, detected, 30, false);
GridRansacLineDetector<GrayF32> alg = new ImplGridRansacLineDetector_F32(40, 10, matcher);
alg.process((GrayF32) derivX, (GrayF32) derivY, detected);
MatrixOfList<LineSegment2D_F32> gridLine = alg.getFoundLines();
// ConnectLinesGrid connect = new ConnectLinesGrid(Math.PI*0.01,1,8);
// connect.process(gridLine);
// LineImageOps.pruneClutteredGrids(gridLine,3);
List<LineSegment2D_F32> found = gridLine.createSingleList();
System.out.println("size = " + found.size());
LineImageOps.mergeSimilar(found, (float) (Math.PI * 0.03), 5f);
// LineImageOps.pruneSmall(found,40);
System.out.println("after size = " + found.size());
ImageLinePanel gui = new ImageLinePanel();
gui.setImage(image);
gui.setLineSegments(found);
gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
BufferedImage renderedBinary = VisualizeBinaryData.renderBinary(detected, false, null);
ShowImages.showWindow(renderedBinary, "Detected Edges");
ShowImages.showWindow(gui, "Detected Lines");
}
use of boofcv.gui.feature.ImageLinePanel in project BoofCV by lessthanoptimal.
the class ExampleLineDetection method detectLines.
/**
* Detects lines inside the image using different types of Hough detectors
*
* @param image Input image.
* @param imageType Type of image processed by line detector.
* @param derivType Type of image derivative.
*/
public static <T extends ImageGray<T>, D extends ImageGray<D>> void detectLines(BufferedImage image, Class<T> imageType, Class<D> derivType) {
// convert the line into a single band image
T input = ConvertBufferedImage.convertFromSingle(image, null, imageType);
// Comment/uncomment to try a different type of line detector
DetectLineHoughPolar<T, D> detector = FactoryDetectLineAlgs.houghPolar(new ConfigHoughPolar(3, 30, 2, Math.PI / 180, edgeThreshold, maxLines), imageType, derivType);
// DetectLineHoughFoot<T,D> detector = FactoryDetectLineAlgs.houghFoot(
// new ConfigHoughFoot(3, 8, 5, edgeThreshold,maxLines), imageType, derivType);
// DetectLineHoughFootSubimage<T,D> detector = FactoryDetectLineAlgs.houghFootSub(
// new ConfigHoughFootSubimage(3, 8, 5, edgeThreshold,maxLines, 2, 2), imageType, derivType);
List<LineParametric2D_F32> found = detector.detect(input);
// display the results
ImageLinePanel gui = new ImageLinePanel();
gui.setBackground(image);
gui.setLines(found);
gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight()));
listPanel.addItem(gui, "Found Lines");
}
Aggregations