use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method createDisk.
/**
* Create a disk filter.
* This is a rasterized approximation of a filled circle with the specified radius.
*
* @param radius radius of the disk; must be > 0
* @param doMean if true, normalize kernel by dividing by the sum of all elements.
* If false, all 'inside' elements are 1 and all 'outside' elements are 0.
* @return a Mat of size {@code radius*2+1} that depicts a filled circle
* @implNote this uses a distance transform, and tends to get more predictable results than {@link #getCircularStructuringElement(int)}.
* Internally, expensive computations are reduced by caching previously calculated filters and returning only a clone.
*/
public static Mat createDisk(int radius, boolean doMean) {
if (radius <= 0)
throw new IllegalArgumentException("Radius must be > 0");
Map<Integer, Mat> cache = doMean ? cachedMeanDisks : cachedSumDisks;
Mat kernel = cache.get(radius);
if (kernel != null) {
synchronized (kernel) {
// This may happen if the kernel was created in a PointerScope and deallocate() was called
if (!kernel.isNull())
return kernel.clone();
}
}
kernel = new Mat();
var kernelCenter = new Mat(radius * 2 + 1, radius * 2 + 1, opencv_core.CV_8UC1, Scalar.WHITE);
try (UByteIndexer idxKernel = kernelCenter.createIndexer()) {
idxKernel.put(radius, radius, 0);
}
opencv_imgproc.distanceTransform(kernelCenter, kernel, opencv_imgproc.DIST_L2, opencv_imgproc.DIST_MASK_PRECISE);
opencv_imgproc.threshold(kernel, kernel, radius, 1, opencv_imgproc.THRESH_BINARY_INV);
if (doMean) {
// Count nonzero pixels
double sum = opencv_core.sumElems(kernel).get();
opencv_core.dividePut(kernel, sum);
}
// Try to keep reference, even if called within a PointerScope
kernel.retainReference();
cache.put(radius, kernel);
return kernel.clone();
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method applyTiled.
/**
* Apply a function to a {@link Mat} that strictly requires a specific input size.
* The output is expected to have the same size as the input, but may have a different number of channels.
* <p>
* This method can be used to:
* <ul>
* <li>Split larger input into tiles of the required size, apply the function and merge the result</li>
* <li>Pad smaller input into tiles of the required size, apply the function and strip padding from the result</li>
* </ul>
* If the image dimensions are not an exact multiple of the requested tile sizes, both steps may be required.
* <p>
* <b>Important!</b> If the output (width & height) of the function is smaller than the input, it will resized
* to have the same dimensions and a warning will be logged.
*
* @param fun the function to apply to the input
* @param mat the input Mat
* @param tileWidth the strict tile width required by the input
* @param tileHeight the strict tile height required by the input
* @param borderType an OpenCV border type, in case padding is needed
* @return the result of applying fun to mat, having applied any necessary tiling along the way
*/
@SuppressWarnings("unchecked")
public static Mat applyTiled(Function<Mat, Mat> fun, Mat mat, int tileWidth, int tileHeight, int borderType) {
int top = 0, bottom = 0, left = 0, right = 0;
boolean doPad = false;
Mat matResult = new Mat();
try (var scope = new PointerScope()) {
if (mat.cols() > tileWidth) {
List<Mat> horizontal = new ArrayList<>();
for (int x = 0; x < mat.cols(); x += tileWidth) {
Mat matTemp = applyTiled(fun, mat.colRange(x, Math.min(x + tileWidth, mat.cols())).clone(), tileWidth, tileHeight, borderType);
horizontal.add(matTemp);
}
opencv_core.hconcat(new MatVector(horizontal.toArray(new Mat[0])), matResult);
return matResult;
} else if (mat.rows() > tileHeight) {
List<Mat> vertical = new ArrayList<>();
for (int y = 0; y < mat.rows(); y += tileHeight) {
Mat matTemp = applyTiled(fun, mat.rowRange(y, Math.min(y + tileHeight, mat.rows())).clone(), tileWidth, tileHeight, borderType);
vertical.add(matTemp);
}
opencv_core.vconcat(new MatVector(vertical.toArray(Mat[]::new)), matResult);
return matResult;
} else if (mat.cols() < tileWidth || mat.rows() < tileHeight) {
// If the image is smaller than we can handle, add padding
top = (tileHeight - mat.rows()) / 2;
left = (tileWidth - mat.cols()) / 2;
bottom = tileHeight - mat.rows() - top;
right = tileWidth - mat.cols() - left;
Mat matPadded = new Mat();
opencv_core.copyMakeBorder(mat, matPadded, top, bottom, left, right, borderType);
mat = matPadded;
doPad = true;
}
// Do the actual requested function
matResult.put(fun.apply(mat));
// (resizing is also handy to support an early StarDist implementation)
if (matResult.rows() != mat.rows() || matResult.cols() != mat.cols()) {
logger.warn("Resizing tiled image from {}x{} to {}x{}", matResult.cols(), matResult.rows(), mat.cols(), mat.rows());
opencv_imgproc.resize(matResult, matResult, mat.size());
}
// Handle padding
if (doPad) {
matResult.put(crop(matResult, left, top, tileWidth - right - left, tileHeight - top - bottom));
}
// scope.deallocate();
}
return matResult;
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method getCircularStructuringElement.
/**
* Create a Mat depicting a circle of the specified radius.
* <p>
* Pixels within the circle have the value 1, pixels outside are 0.
*
* @param radius
* @return
* @deprecated {@link #createDisk(int, boolean)} gives more reliable shapes.
*/
@Deprecated
public static Mat getCircularStructuringElement(int radius) {
// TODO: Find out why this doesn't just call a standard request for a strel...
Mat strel = new Mat(radius * 2 + 1, radius * 2 + 1, opencv_core.CV_8UC1, Scalar.ZERO);
opencv_imgproc.circle(strel, new Point(radius, radius), radius, Scalar.ONE, -1, opencv_imgproc.LINE_8, 0);
return strel;
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method matToImageProcessor.
/**
* Convert a single-channel OpenCV {@code Mat} into an ImageJ {@code ImageProcessor}.
*
* @param mat
* @return
*/
public static ImageProcessor matToImageProcessor(Mat mat) {
if (mat.channels() != 1)
throw new IllegalArgumentException("Only a single-channel Mat can be converted to an ImageProcessor! Specified Mat has " + mat.channels() + " channels");
int w = mat.cols();
int h = mat.rows();
if (mat.depth() == opencv_core.CV_32F) {
FloatIndexer indexer = mat.createIndexer();
float[] pixels = new float[w * h];
indexer.get(0L, pixels);
return new FloatProcessor(w, h, pixels);
} else if (mat.depth() == opencv_core.CV_8U) {
UByteIndexer indexer = mat.createIndexer();
int[] pixels = new int[w * h];
indexer.get(0L, pixels);
ByteProcessor bp = new ByteProcessor(w, h);
for (int i = 0; i < pixels.length; i++) bp.set(i, pixels[i]);
return bp;
} else if (mat.depth() == opencv_core.CV_16U) {
UShortIndexer indexer = mat.createIndexer();
int[] pixels = new int[w * h];
indexer.get(0L, pixels);
short[] shortPixels = new short[pixels.length];
for (int i = 0; i < pixels.length; i++) shortPixels[i] = (short) pixels[i];
// TODO: Test!
return new ShortProcessor(w, h, shortPixels, null);
} else {
Mat mat2 = new Mat();
mat.convertTo(mat2, opencv_core.CV_32F);
ImageProcessor ip = matToImageProcessor(mat2);
mat2.close();
return ip;
}
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method imageToMat.
/**
* Convert a BufferedImage to an OpenCV Mat.
* <p>
* An effort will be made to do a sensible conversion based on the BufferedImage type,
* returning a Mat with a suitable type.
* <p>
* BGR and RGB images will remain with the same channel order, and an alpha channel
* (if present) will be included at the end (i.e. to give BGRA or RGBA).
* <p>
* Note: the behavior of this method has changed; in QuPath <= 0.1.2 only
* RGB images were really supported, and an RGB conversion was *always* made.
*
* @see #imageToMatRGB
* @see #imageToMatBGR
*
* @param img
* @return
*/
public static Mat imageToMat(BufferedImage img) {
switch(img.getType()) {
case BufferedImage.TYPE_INT_BGR:
case BufferedImage.TYPE_3BYTE_BGR:
return imageToMatBGR(img, false);
case BufferedImage.TYPE_4BYTE_ABGR:
case BufferedImage.TYPE_4BYTE_ABGR_PRE:
return imageToMatBGR(img, true);
case BufferedImage.TYPE_INT_ARGB:
case BufferedImage.TYPE_INT_ARGB_PRE:
return imageToMatRGB(img, true);
case BufferedImage.TYPE_USHORT_555_RGB:
case BufferedImage.TYPE_USHORT_565_RGB:
case BufferedImage.TYPE_INT_RGB:
return imageToMatRGB(img, false);
case BufferedImage.TYPE_USHORT_GRAY:
}
int width = img.getWidth();
int height = img.getHeight();
WritableRaster raster = img.getRaster();
DataBuffer buffer = raster.getDataBuffer();
int nChannels = raster.getNumBands();
int typeCV;
switch(buffer.getDataType()) {
case DataBuffer.TYPE_BYTE:
typeCV = opencv_core.CV_8UC(nChannels);
break;
// break;
case DataBuffer.TYPE_FLOAT:
typeCV = opencv_core.CV_32FC(nChannels);
break;
case DataBuffer.TYPE_INT:
// Assuming signed int
typeCV = opencv_core.CV_32SC(nChannels);
break;
case DataBuffer.TYPE_SHORT:
typeCV = opencv_core.CV_16SC(nChannels);
break;
case DataBuffer.TYPE_USHORT:
typeCV = opencv_core.CV_16UC(nChannels);
break;
default:
// Assume 64-bit is as flexible as we can manage
typeCV = opencv_core.CV_64FC(nChannels);
}
// Create a new Mat & put the pixels
Mat mat = new Mat(height, width, typeCV, Scalar.ZERO);
putPixels(raster, mat);
return mat;
}
Aggregations