use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class FactoryDetectDescribe method sift.
/**
* Creates a new SIFT feature detector and describer.
*
* @see CompleteSift
*
* @param config Configuration for the SIFT detector and descriptor.
* @return SIFT
*/
public static <T extends ImageGray<T>> DetectDescribePoint<T, BrightFeature> sift(@Nullable ConfigCompleteSift config) {
if (config == null)
config = new ConfigCompleteSift();
ConfigSiftScaleSpace configSS = config.scaleSpace;
ConfigSiftDetector configDetector = config.detector;
ConfigSiftOrientation configOri = config.orientation;
ConfigSiftDescribe configDesc = config.describe;
SiftScaleSpace scaleSpace = new SiftScaleSpace(configSS.firstOctave, configSS.lastOctave, configSS.numScales, configSS.sigma0);
OrientationHistogramSift<GrayF32> orientation = new OrientationHistogramSift<>(configOri.histogramSize, configOri.sigmaEnlarge, GrayF32.class);
DescribePointSift<GrayF32> describe = new DescribePointSift<>(configDesc.widthSubregion, configDesc.widthGrid, configDesc.numHistogramBins, configDesc.sigmaToPixels, configDesc.weightingSigmaFraction, configDesc.maxDescriptorElementValue, GrayF32.class);
NonMaxSuppression nns = FactoryFeatureExtractor.nonmax(configDetector.extract);
NonMaxLimiter nonMax = new NonMaxLimiter(nns, configDetector.maxFeaturesPerScale);
CompleteSift dds = new CompleteSift(scaleSpace, configDetector.edgeR, nonMax, orientation, describe);
return new DetectDescribe_CompleteSift<>(dds);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class HornSchunckPyramid method process.
/**
* Computes dense optical flow from the provided image pyramid. Image gradient for each layer should be
* computed directly from the layer images.
*
* @param image1 Pyramid of first image
* @param image2 Pyramid of second image
*/
@Override
public void process(ImagePyramid<GrayF32> image1, ImagePyramid<GrayF32> image2) {
// Process the pyramid from low resolution to high resolution
boolean first = true;
for (int i = image1.getNumLayers() - 1; i >= 0; i--) {
GrayF32 layer1 = image1.getLayer(i);
GrayF32 layer2 = image2.getLayer(i);
// declare memory for this layer
deriv2X.reshape(layer1.width, layer1.height);
deriv2Y.reshape(layer1.width, layer1.height);
warpDeriv2X.reshape(layer1.width, layer1.height);
warpDeriv2Y.reshape(layer1.width, layer1.height);
warpImage2.reshape(layer1.width, layer1.height);
// compute the gradient for the second image
gradient.process(layer2, deriv2X, deriv2Y);
if (!first) {
// interpolate initial flow from previous layer
interpolateFlowScale(layer1.width, layer1.height);
} else {
// for the very first layer there is no information on flow so set everything to 0
first = false;
initFlowX.reshape(layer1.width, layer1.height);
initFlowY.reshape(layer1.width, layer1.height);
flowX.reshape(layer1.width, layer1.height);
flowY.reshape(layer1.width, layer1.height);
ImageMiscOps.fill(flowX, 0);
ImageMiscOps.fill(flowY, 0);
ImageMiscOps.fill(initFlowX, 0);
ImageMiscOps.fill(initFlowY, 0);
}
// compute flow for this layer
processLayer(layer1, layer2, deriv2X, deriv2Y);
}
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class TestRemovePerspectiveDistortion method identity.
/**
* The transform should not scale and produce a simple transform from input to output
*/
@Test
public void identity() {
RemovePerspectiveDistortion<GrayF32> alg = new RemovePerspectiveDistortion<>(30, 40, ImageType.single(GrayF32.class));
alg.createTransform(new Point2D_F64(20, 30), new Point2D_F64(50, 30), new Point2D_F64(50, 70), new Point2D_F64(20, 70));
Point2D_F32 p = new Point2D_F32();
PointTransformHomography_F32 transform = alg.getTransform();
transform.compute(0, 0, p);
assertTrue(p.distance(20, 30) < UtilEjml.TEST_F64);
transform.compute(30, 40, p);
assertTrue(p.distance(50, 70) < UtilEjml.TEST_F64);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class TestRemovePerspectiveDistortion method undoDistortion.
@Test
public void undoDistortion() {
GrayF32 expected = new GrayF32(30, 40);
GrayF32 input = new GrayF32(200, 150);
Point2D_F64 topLeft = new Point2D_F64(30, 20);
Point2D_F64 topRight = new Point2D_F64(80, 30);
Point2D_F64 bottomRight = new Point2D_F64(70, 90);
Point2D_F64 bottomLeft = new Point2D_F64(25, 80);
GImageMiscOps.fill(expected, 255);
GImageMiscOps.fillRectangle(expected, 100, 10, 10, 15, 25);
// apply homography distortion to expected
applyForwardTransform(expected, input, topLeft, topRight, bottomRight, bottomLeft);
// now reverse it with the class
RemovePerspectiveDistortion<GrayF32> alg = new RemovePerspectiveDistortion<>(30, 40, ImageType.single(GrayF32.class));
assertTrue(alg.apply(input, topLeft, topRight, bottomRight, bottomLeft));
GrayF32 found = alg.getOutput();
GrayF32 difference = found.createSameShape();
PixelMath.diffAbs(expected, found, difference);
double error = ImageStatistics.sum(difference) / (difference.width * difference.height);
assertTrue(error < 10);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class TestMultiCameraToEquirectangular method addCamera_explicit_mask.
@Test
public void addCamera_explicit_mask() {
MultiCameraToEquirectangular<GrayF32> alg = createAlgorithm();
// mask out the right part of the image
GrayU8 mask = new GrayU8(inputWidth, inputHeight);
for (int y = 0; y < inputHeight; y++) {
for (int x = 0; x < inputWidth / 2; x++) {
mask.set(x, y, 1);
}
}
alg.addCamera(new Se3_F32(), new HelperDistortion(), mask);
MultiCameraToEquirectangular.Camera c = alg.cameras.get(0);
// should be masked off by the passed in mask and because values are repeated
int correct = 0;
for (int y = 0; y < inputHeight; y++) {
for (int x = 0; x < inputWidth; x++) {
boolean valid = y < inputHeight / 2 && x < inputWidth / 2;
if (valid && c.mask.get(x, y) > 0) {
correct++;
}
}
}
double found = Math.abs(1.0 - correct / (inputWidth * inputHeight / 4.0));
assertTrue(found <= 0.05);
}
Aggregations