use of boofcv.gui.image.ImagePanel in project BoofCV by lessthanoptimal.
the class FiducialDetection method process.
private void process() {
if (detector == null) {
System.err.println("Need to specify which fiducial you wish to detect");
System.exit(1);
}
if (outputPath != null) {
try {
outputFile = new PrintStream(outputPath);
outputFile.println("# Results from fiducial detection ");
outputFile.println("# These comments should include the data source and the algorithm used, but I'm busy.");
outputFile.println("# ");
outputFile.println("# <frame #> <number of fiducials> <fiducial id> <X> <Y> <Z> <Q1> <Q2> <Q3> <Q4> ...");
outputFile.println("# ");
outputFile.println("# The special Euclidean transform saved each fiducial is from fiducial to camera");
outputFile.println("# (X,Y,Z) is the translation and (Q1,Q2,Q3,Q4) specifies a quaternion");
outputFile.println("# ");
} catch (FileNotFoundException e) {
System.err.println("Failed to open output file.");
System.err.println(e.getMessage());
System.exit(1);
}
}
MediaManager media = DefaultMediaManager.INSTANCE;
CameraPinholeRadial intrinsic = intrinsicPath == null ? null : (CameraPinholeRadial) CalibrationIO.load(intrinsicPath);
SimpleImageSequence<GrayU8> sequence = null;
long pause = 0;
BufferedImage buffered = null;
if (inputType == InputType.VIDEO || inputType == InputType.WEBCAM) {
if (inputType == InputType.WEBCAM) {
String device = getCameraDeviceString();
sequence = media.openCamera(device, desiredWidth, desiredHeight, ImageType.single(GrayU8.class));
} else {
// just assume 30ms is appropriate. Should let the use specify this number
pause = 30;
sequence = media.openVideo(filePath, ImageType.single(GrayU8.class));
sequence.setLoop(true);
}
intrinsic = handleIntrinsic(intrinsic, sequence.getNextWidth(), sequence.getNextHeight());
} else {
buffered = UtilImageIO.loadImage(filePath);
if (buffered == null) {
System.err.println("Can't find image or it can't be read. " + filePath);
System.exit(1);
}
intrinsic = handleIntrinsic(intrinsic, buffered.getWidth(), buffered.getHeight());
}
ImagePanel gui = new ImagePanel();
gui.setPreferredSize(new Dimension(intrinsic.width, intrinsic.height));
ShowImages.showWindow(gui, "Fiducial Detector", true);
detector.setLensDistortion(new LensDistortionRadialTangential(intrinsic), intrinsic.width, intrinsic.height);
if (sequence != null) {
processStream(intrinsic, sequence, gui, pause);
} else {
processImage(intrinsic, buffered, gui);
}
}
use of boofcv.gui.image.ImagePanel in project narchy by automenta.
the class WebcamTrack method main.
public static void main(String[] args) {
// tune the tracker for the image size and visual appearance
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(-1, 8, 1);
PkltConfig configKlt = new PkltConfig(3, new int[] { 1, 2, 4, 8 });
PointTracker<ImageFloat32> tracker = FactoryPointTracker.klt(configKlt, configDetector, ImageFloat32.class, null);
// Open a webcam at a resolution close to 640x480
Webcam webcam = UtilWebcamCapture.openDefault(640, 480);
// Create the panel used to display the image and
ImagePanel gui = new ImagePanel();
gui.setPreferredSize(webcam.getViewSize());
ShowImages.showWindow(gui, "KLT Tracker");
int minimumTracks = 100;
while (true) {
BufferedImage image = webcam.getImage();
ImageFloat32 gray = ConvertBufferedImage.convertFrom(image, (ImageFloat32) null);
tracker.process(gray);
List<PointTrack> tracks = tracker.getActiveTracks(null);
// Spawn tracks if there are too few
if (tracks.size() < minimumTracks) {
tracker.spawnTracks();
tracks = tracker.getActiveTracks(null);
minimumTracks = tracks.size() / 2;
}
// Draw the tracks
Graphics2D g2 = image.createGraphics();
for (PointTrack t : tracks) {
VisualizeFeatures.drawPoint(g2, (int) t.x, (int) t.y, Color.RED);
}
gui.setBufferedImageSafe(image);
}
}
use of boofcv.gui.image.ImagePanel in project BoofCV by lessthanoptimal.
the class RemoveLensDistortionApp method processImage.
@Override
public void processImage(int sourceID, long frameID, final BufferedImage buffered, ImageBase input) {
// strip away distortion parameters
CameraPinhole desired = new CameraPinhole(param);
// distorted image
dist = (T) input.clone();
// storage for undistorted image
undist = (T) input.createSameShape();
// show results and draw a horizontal line where the user clicks to see rectification easier
SwingUtilities.invokeLater(new Runnable() {
public void run() {
gui.reset();
gui.addItem(new ImagePanel(buffered), "Original");
}
});
// add different types of adjustments
Point2Transform2_F32 add_p_to_p = LensDistortionOps.transformChangeModel_F32(AdjustmentType.NONE, param, desired, true, null);
addUndistorted("No Adjustment", add_p_to_p);
Point2Transform2_F32 expand = LensDistortionOps.transformChangeModel_F32(AdjustmentType.EXPAND, param, desired, true, null);
addUndistorted("Expand", expand);
Point2Transform2_F32 fullView = LensDistortionOps.transformChangeModel_F32(AdjustmentType.FULL_VIEW, param, desired, true, null);
addUndistorted("Full View", fullView);
}
use of boofcv.gui.image.ImagePanel in project BoofCV by lessthanoptimal.
the class ExampleSegmentColor method printClickedColor.
/**
* Shows a color image and allows the user to select a pixel, convert it to HSV, print
* the HSV values, and calls the function below to display similar pixels.
*/
public static void printClickedColor(final BufferedImage image) {
ImagePanel gui = new ImagePanel(image);
gui.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
float[] color = new float[3];
int rgb = image.getRGB(e.getX(), e.getY());
ColorHsv.rgbToHsv((rgb >> 16) & 0xFF, (rgb >> 8) & 0xFF, rgb & 0xFF, color);
System.out.println("H = " + color[0] + " S = " + color[1] + " V = " + color[2]);
showSelectedColor("Selected", image, color[0], color[1]);
}
});
ShowImages.showWindow(gui, "Color Selector");
}
use of boofcv.gui.image.ImagePanel in project BoofCV by lessthanoptimal.
the class ExampleJCodecDisplayFrames method main.
public static void main(String[] args) {
String fileName = UtilIO.pathExample("background/highway_bridge_jitter.mp4");
ImageType type = ImageType.pl(3, GrayU8.class);
// ImageType type = ImageType.single(GrayU8.class);
// ImageType type = ImageType.pl(3, GrayF32.class);
// ImageType type = ImageType.single(GrayF32.class);
JCodecSimplified sequence = new JCodecSimplified<>(fileName, type);
BufferedImage out;
if (sequence.hasNext()) {
ImageBase frame = sequence.next();
out = new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(frame, out, false);
} else {
throw new RuntimeException("No first frame?!?!");
}
ImagePanel gui = new ImagePanel(out);
ShowImages.showWindow(gui, "Video!", true);
long totalNano = 0;
while (sequence.hasNext()) {
long before = System.nanoTime();
ImageBase frame = sequence.next();
totalNano += System.nanoTime() - before;
ConvertBufferedImage.convertTo(frame, out, false);
gui.repaint();
try {
Thread.sleep(22);
} catch (InterruptedException e) {
}
}
System.out.println("Only read FPS = " + (totalNano / 1000000.0) / sequence.getFrameNumber());
}
Aggregations