use of georegression.struct.se.Se3_F32 in project BoofCV by lessthanoptimal.
the class TestMultiCameraToEquirectangular method addCamera_explicit_mask.
@Test
public void addCamera_explicit_mask() {
MultiCameraToEquirectangular<GrayF32> alg = createAlgorithm();
// mask out the right part of the image
GrayU8 mask = new GrayU8(inputWidth, inputHeight);
for (int y = 0; y < inputHeight; y++) {
for (int x = 0; x < inputWidth / 2; x++) {
mask.set(x, y, 1);
}
}
alg.addCamera(new Se3_F32(), new HelperDistortion(), mask);
MultiCameraToEquirectangular.Camera c = alg.cameras.get(0);
// should be masked off by the passed in mask and because values are repeated
int correct = 0;
for (int y = 0; y < inputHeight; y++) {
for (int x = 0; x < inputWidth; x++) {
boolean valid = y < inputHeight / 2 && x < inputWidth / 2;
if (valid && c.mask.get(x, y) > 0) {
correct++;
}
}
}
double found = Math.abs(1.0 - correct / (inputWidth * inputHeight / 4.0));
assertTrue(found <= 0.05);
}
use of georegression.struct.se.Se3_F32 in project BoofCV by lessthanoptimal.
the class TestMultiCameraToEquirectangular method addCamera_implicit_mask.
@Test
public void addCamera_implicit_mask() {
MultiCameraToEquirectangular<GrayF32> alg = createAlgorithm();
alg.addCamera(new Se3_F32(), new HelperDistortion(), inputWidth, inputHeight);
MultiCameraToEquirectangular.Camera c = alg.cameras.get(0);
// should be masked off by the passed in mask and because values are repeated
int correct = 0;
for (int y = 0; y < inputHeight; y++) {
for (int x = 0; x < inputWidth; x++) {
if (y < inputHeight / 2 && c.mask.get(x, y) > 0) {
correct++;
}
}
}
double found = Math.abs(1.0 - correct / (inputWidth * inputHeight / 2.0));
assertTrue(found <= 0.05);
}
use of georegression.struct.se.Se3_F32 in project BoofCV by lessthanoptimal.
the class ExampleFisheyeToEquirectangular method main.
public static void main(String[] args) {
// Path to image data and calibration data
String fisheyePath = UtilIO.pathExample("fisheye/theta");
// load the fisheye camera parameters
CameraUniversalOmni model0 = CalibrationIO.load(new File(fisheyePath, "front.yaml"));
CameraUniversalOmni model1 = CalibrationIO.load(new File(fisheyePath, "back.yaml"));
LensDistortionWideFOV distort0 = new LensDistortionUniversalOmni(model0);
LensDistortionWideFOV distort1 = new LensDistortionUniversalOmni(model1);
ImageType<Planar<GrayF32>> imageType = ImageType.pl(3, GrayF32.class);
InterpolatePixel<Planar<GrayF32>> interp = FactoryInterpolation.createPixel(0, 255, InterpolationType.BILINEAR, BorderType.ZERO, imageType);
ImageDistort<Planar<GrayF32>, Planar<GrayF32>> distort = FactoryDistort.distort(false, interp, imageType);
// This will create an equirectangular image with 800 x 400 pixels
MultiCameraToEquirectangular<Planar<GrayF32>> alg = new MultiCameraToEquirectangular<>(distort, 800, 400, imageType);
// this is an important parameter and is used to filter out falsely mirrored pixels
alg.setMaskToleranceAngle(UtilAngle.radian(0.1f));
// camera has a known FOV of 185 degrees
GrayU8 mask0 = createMask(model0, distort0, UtilAngle.radian(182));
// the edges are likely to be noisy,
GrayU8 mask1 = createMask(model1, distort1, UtilAngle.radian(182));
// so crop it a bit..
// Rotate camera axis so that +x is forward and not +z and make it visually pleasing
FMatrixRMaj adjR = ConvertRotation3D_F32.eulerToMatrix(EulerType.XYZ, GrlConstants.F_PI / 2, 0, 0, null);
// Rotation from the front camera to the back facing camera.
// This is only an approximation. Should be determined through calibration.
FMatrixRMaj f2b = ConvertRotation3D_F32.eulerToMatrix(EulerType.ZYX, GrlConstants.F_PI, 0, 0, null);
Se3_F32 frontToFront = new Se3_F32();
frontToFront.setRotation(adjR);
Se3_F32 frontToBack = new Se3_F32();
CommonOps_FDRM.mult(f2b, adjR, frontToBack.R);
// add the camera and specify which pixels are valid. These functions precompute the entire transform
// and can be relatively slow, but generating the equirectangular image should be much faster
alg.addCamera(frontToBack, distort0, mask0);
alg.addCamera(frontToFront, distort1, mask1);
// Load fisheye RGB image
BufferedImage buffered0 = UtilImageIO.loadImage(fisheyePath, "front_table.jpg");
Planar<GrayF32> fisheye0 = ConvertBufferedImage.convertFrom(buffered0, true, ImageType.pl(3, GrayF32.class));
BufferedImage buffered1 = UtilImageIO.loadImage(fisheyePath, "back_table.jpg");
Planar<GrayF32> fisheye1 = ConvertBufferedImage.convertFrom(buffered1, true, ImageType.pl(3, GrayF32.class));
List<Planar<GrayF32>> images = new ArrayList<>();
images.add(fisheye0);
images.add(fisheye1);
alg.render(images);
BufferedImage equiOut = ConvertBufferedImage.convertTo(alg.getRenderedImage(), null, true);
ShowImages.showWindow(equiOut, "Dual Fisheye to Equirectangular", true);
}
use of georegression.struct.se.Se3_F32 in project BoofCV by lessthanoptimal.
the class TestVisOdomDirectColorDepth method singleStepArtificialTranslation.
/**
* Generate low level synthetic data that should simulate a translation along one axis. Then check to see if
* has the expected behavior at a high level
*/
@Test
public void singleStepArtificialTranslation() {
// it wants to declares the color of each pixel, the gradient says it increases to the right
// so it will move in the negative x direction
Se3_F32 a = computeMotion(10, 20, 6, 0);
assertTrue(a.T.x < -0.02);
assertEquals(0, a.T.y, 1e-2f);
assertEquals(0, a.T.z, 1e-2f);
assertTrue(rotationMag(a) < GrlConstants.TEST_SQ_F64);
// reverse the direction
Se3_F32 b = computeMotion(20, 10, 6, 0);
assertTrue(b.T.x > 0.02);
assertEquals(0, b.T.y, 1e-2f);
assertEquals(0, b.T.z, 1e-2f);
assertTrue(rotationMag(b) < GrlConstants.TEST_SQ_F64);
assertEquals(a.T.x, -b.T.x, 1e-4f);
// make it move along the y-axis
Se3_F32 c = computeMotion(10, 20, 0, 6);
assertEquals(0, c.T.x, 1e-2f);
assertTrue(c.T.y < -0.02);
assertEquals(0, c.T.z, 1e-2f);
assertTrue(rotationMag(c) < GrlConstants.TEST_SQ_F64);
assertEquals(a.T.x, c.T.y, 0.01);
// increase the magnitude of the motion by making the gradient smaller
Se3_F32 d = computeMotion(10, 20, 3, 0);
assertTrue(1.5f * Math.abs(a.T.x) < Math.abs(d.T.x));
}
use of georegression.struct.se.Se3_F32 in project BoofCV by lessthanoptimal.
the class TestVisOdomDirectColorDepth method computeMotion.
public Se3_F32 computeMotion(float colorBefore, float colorAfter, float dx, float dy) {
VisOdomDirectColorDepth<GrayF32, GrayF32> alg = new VisOdomDirectColorDepth<>(numBands, imageType, imageType);
alg.setCameraParameters(fx, fy, cx, cy, width, height);
Planar<GrayF32> input = new Planar<>(GrayF32.class, width, height, numBands);
GImageMiscOps.fill(input, colorAfter);
alg.initMotion(input);
GImageMiscOps.fill(alg.derivX, dx);
GImageMiscOps.fill(alg.derivY, dy);
// need to add noise to avoid pathological stuff
GImageMiscOps.addUniform(alg.derivX, rand, 0f, 0.1f);
GImageMiscOps.addUniform(alg.derivY, rand, 0f, 0.1f);
// generate some synthetic data. This will be composed of random points in front of the camera
for (int i = 0; i < 100; i++) {
VisOdomDirectColorDepth.Pixel p = alg.keypixels.grow();
for (int band = 0; band < numBands; band++) {
p.bands[band] = colorBefore;
}
p.x = rand.nextInt(width);
p.y = rand.nextInt(height);
float nx = (p.x - cx) / fx;
float ny = (p.y - cy) / fy;
// z needs to fixed value for it to generate a purely translational motion given fixed gradient and
// and fixed delta in color
float z = 2;
p.p3.x = nx * z;
p.p3.y = ny * z;
p.p3.z = z;
}
// estimate the motion
alg.constructLinearSystem(input, new Se3_F32());
assertTrue(alg.solveSystem());
assertEquals(Math.abs(colorAfter - colorBefore), alg.getErrorOptical(), 1e-4f);
// counting error can cause a drop
assertTrue(alg.getInboundsPixels() > 95);
return alg.motionTwist;
}
Aggregations