use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.
the class DetectCytokeratinCV method getArea.
/**
* Get an Area object corresponding to contours in a binary image from OpenCV.
* @param mat
* @return
*/
private static Area getArea(final Mat mat) {
if (mat.empty())
return null;
// Identify all contours
MatVector contours = new MatVector();
Mat hierarchy = new Mat();
opencv_imgproc.findContours(mat, contours, hierarchy, opencv_imgproc.RETR_TREE, opencv_imgproc.CHAIN_APPROX_SIMPLE);
if (contours.empty()) {
hierarchy.close();
return null;
}
Area area = new Area();
updateArea(contours, hierarchy, area, 0, 0);
hierarchy.close();
return area;
}
use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.
the class DnnTools method blobFromImages.
/**
* Create an OpenCV blob from a batch of Mats with optional scaling, resizing and cropping.
* @param mats input images
* @param scaleFactor scale factor
* @param size input width and height
* @param mean mean values for subtraction
* @param swapRB swap red and blue of the mean values
* @param crop center crop after resizing if needed
* @return a blob with axis order NCHW
*/
public static Mat blobFromImages(Collection<Mat> mats, double scaleFactor, Size size, Scalar mean, boolean swapRB, boolean crop) {
// if (mat.depth() != opencv_core.CV_32F) {
// var mat2 = new Mat();
// mat.convertTo(mat2, opencv_core.CV_32F);
// mat2 = mat;
// }
Mat blob = null;
Mat first = mats.iterator().next();
int nChannels = first.channels();
if (nChannels == 1 || nChannels == 3 || nChannels == 4) {
if (mats.size() == 1)
blob = opencv_dnn.blobFromImage(first, scaleFactor, size, mean, swapRB, crop, opencv_core.CV_32F);
else
blob = opencv_dnn.blobFromImages(new MatVector(mats.toArray(Mat[]::new)), scaleFactor, size, mean, swapRB, crop, opencv_core.CV_32F);
} else {
// TODO: Don't have any net to test this with currently...
logger.warn("Attempting to reshape an image with " + nChannels + " channels - this may not work! " + "Only 1, 3 and 4 full supported, preprocessing will be ignored.");
// Blob is a 4D Tensor [NCHW]
int[] shape = new int[4];
Arrays.fill(shape, 1);
int nRows = first.size(0);
int nCols = first.size(1);
shape[0] = mats.size();
shape[1] = nChannels;
shape[2] = nRows;
shape[3] = nCols;
// for (int s = 1; s <= Math.min(nDims, 3); s++) {
// shape[s] = mat.size(s-1);
// }
blob = new Mat(shape, opencv_core.CV_32F);
var idxBlob = blob.createIndexer();
long[] indsBlob = new long[4];
int n = 0;
for (var mat : mats) {
indsBlob[0] = n++;
long[] indsMat = new long[4];
var idxMat = mat.createIndexer();
for (int r = 0; r < nRows; r++) {
indsMat[0] = r;
indsBlob[2] = r;
for (int c = 0; c < nCols; c++) {
indsMat[1] = c;
indsBlob[3] = c;
for (int channel = 0; channel < nChannels; channel++) {
indsMat[2] = channel;
indsBlob[1] = channel;
double val = idxMat.getDouble(indsMat);
idxBlob.putDouble(indsBlob, val);
}
}
}
idxMat.close();
}
idxBlob.close();
}
return blob;
}
use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.
the class OpenCVTools method matToBufferedImage.
/**
* Convert a Mat to a BufferedImage.
* <p>
* If no ColorModel is specified, a grayscale model will be used for single-channel 8-bit
* images and RGB/ARGB for 3/4 channel 8-bit images.
* <p>
* For all other cases a ColorModel should be specified for meaningful display.
*
* @param mat
* @param colorModel
* @return
*/
public static BufferedImage matToBufferedImage(final Mat mat, ColorModel colorModel) {
int type;
int bpp = 0;
switch(mat.depth()) {
case opencv_core.CV_8U:
type = DataBuffer.TYPE_BYTE;
bpp = 8;
break;
case opencv_core.CV_8S:
// Byte is unsigned
type = DataBuffer.TYPE_SHORT;
bpp = 16;
break;
case opencv_core.CV_16U:
type = DataBuffer.TYPE_USHORT;
bpp = 16;
break;
case opencv_core.CV_16S:
type = DataBuffer.TYPE_SHORT;
bpp = 16;
break;
case opencv_core.CV_32S:
type = DataBuffer.TYPE_INT;
bpp = 32;
break;
case opencv_core.CV_32F:
type = DataBuffer.TYPE_FLOAT;
bpp = 32;
break;
default:
logger.warn("Unknown Mat depth {}, will default to CV64F ({})", mat.depth(), opencv_core.CV_64F);
case opencv_core.CV_64F:
type = DataBuffer.TYPE_DOUBLE;
bpp = 64;
}
// Create a suitable raster
int width = mat.cols();
int height = mat.rows();
int channels = mat.channels();
// We might generate an image for a special case
BufferedImage img = null;
// Handle some special cases
if (colorModel == null) {
if (type == DataBuffer.TYPE_BYTE) {
if (channels == 1) {
img = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
// TODO: Set the bytes
} else if (channels == 3) {
img = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
} else if (channels == 4) {
img = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
}
}
} else if (colorModel instanceof IndexColorModel) {
img = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_INDEXED, (IndexColorModel) colorModel);
}
// Create the image
WritableRaster raster;
if (img != null) {
raster = img.getRaster();
} else if (colorModel != null) {
raster = colorModel.createCompatibleWritableRaster(width, height);
img = new BufferedImage(colorModel, raster, false, null);
} else {
// Create some kind of raster we can use
var sampleModel = new BandedSampleModel(type, width, height, channels);
raster = WritableRaster.createWritableRaster(sampleModel, null);
// We do need a ColorModel or some description
colorModel = ColorModelFactory.getDummyColorModel(bpp * channels);
img = new BufferedImage(colorModel, raster, false, null);
}
MatVector matvector = new MatVector();
opencv_core.split(mat, matvector);
// We don't know which of the 3 supported array types will be needed yet...
int[] pixelsInt = null;
float[] pixelsFloat = null;
double[] pixelsDouble = null;
for (int b = 0; b < channels; b++) {
// Extract pixels for the current channel
Mat matChannel = matvector.get(b);
Indexer indexer = matChannel.createIndexer();
if (indexer instanceof UByteIndexer) {
if (pixelsInt == null)
pixelsInt = new int[width * height];
((UByteIndexer) indexer).get(0L, pixelsInt);
} else if (indexer instanceof UShortIndexer) {
if (pixelsInt == null)
pixelsInt = new int[width * height];
((UShortIndexer) indexer).get(0L, pixelsInt);
} else if (indexer instanceof FloatIndexer) {
if (pixelsFloat == null)
pixelsFloat = new float[width * height];
((FloatIndexer) indexer).get(0L, pixelsFloat);
} else if (indexer instanceof DoubleIndexer) {
if (pixelsDouble == null)
pixelsDouble = new double[width * height];
((DoubleIndexer) indexer).get(0L, pixelsDouble);
} else {
if (pixelsDouble == null)
pixelsDouble = new double[width * height];
// This is inefficient, but unlikely to occur too often
pixelsDouble = new double[width * height];
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
pixelsDouble[y * width + x] = indexer.getDouble(y, x, b);
}
}
}
// Set the samples
if (pixelsInt != null)
raster.setSamples(0, 0, width, height, b, pixelsInt);
else if (pixelsFloat != null)
raster.setSamples(0, 0, width, height, b, pixelsFloat);
else if (pixelsDouble != null)
raster.setSamples(0, 0, width, height, b, pixelsDouble);
}
return img;
}
use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.
the class OpenCVTools method applyTiled.
/**
* Apply a function to a {@link Mat} that strictly requires a specific input size.
* The output is expected to have the same size as the input, but may have a different number of channels.
* <p>
* This method can be used to:
* <ul>
* <li>Split larger input into tiles of the required size, apply the function and merge the result</li>
* <li>Pad smaller input into tiles of the required size, apply the function and strip padding from the result</li>
* </ul>
* If the image dimensions are not an exact multiple of the requested tile sizes, both steps may be required.
* <p>
* <b>Important!</b> If the output (width & height) of the function is smaller than the input, it will resized
* to have the same dimensions and a warning will be logged.
*
* @param fun the function to apply to the input
* @param mat the input Mat
* @param tileWidth the strict tile width required by the input
* @param tileHeight the strict tile height required by the input
* @param borderType an OpenCV border type, in case padding is needed
* @return the result of applying fun to mat, having applied any necessary tiling along the way
*/
@SuppressWarnings("unchecked")
public static Mat applyTiled(Function<Mat, Mat> fun, Mat mat, int tileWidth, int tileHeight, int borderType) {
int top = 0, bottom = 0, left = 0, right = 0;
boolean doPad = false;
Mat matResult = new Mat();
try (var scope = new PointerScope()) {
if (mat.cols() > tileWidth) {
List<Mat> horizontal = new ArrayList<>();
for (int x = 0; x < mat.cols(); x += tileWidth) {
Mat matTemp = applyTiled(fun, mat.colRange(x, Math.min(x + tileWidth, mat.cols())).clone(), tileWidth, tileHeight, borderType);
horizontal.add(matTemp);
}
opencv_core.hconcat(new MatVector(horizontal.toArray(new Mat[0])), matResult);
return matResult;
} else if (mat.rows() > tileHeight) {
List<Mat> vertical = new ArrayList<>();
for (int y = 0; y < mat.rows(); y += tileHeight) {
Mat matTemp = applyTiled(fun, mat.rowRange(y, Math.min(y + tileHeight, mat.rows())).clone(), tileWidth, tileHeight, borderType);
vertical.add(matTemp);
}
opencv_core.vconcat(new MatVector(vertical.toArray(Mat[]::new)), matResult);
return matResult;
} else if (mat.cols() < tileWidth || mat.rows() < tileHeight) {
// If the image is smaller than we can handle, add padding
top = (tileHeight - mat.rows()) / 2;
left = (tileWidth - mat.cols()) / 2;
bottom = tileHeight - mat.rows() - top;
right = tileWidth - mat.cols() - left;
Mat matPadded = new Mat();
opencv_core.copyMakeBorder(mat, matPadded, top, bottom, left, right, borderType);
mat = matPadded;
doPad = true;
}
// Do the actual requested function
matResult.put(fun.apply(mat));
// (resizing is also handy to support an early StarDist implementation)
if (matResult.rows() != mat.rows() || matResult.cols() != mat.cols()) {
logger.warn("Resizing tiled image from {}x{} to {}x{}", matResult.cols(), matResult.rows(), mat.cols(), mat.rows());
opencv_imgproc.resize(matResult, matResult, mat.size());
}
// Handle padding
if (doPad) {
matResult.put(crop(matResult, left, top, tileWidth - right - left, tileHeight - top - bottom));
}
// scope.deallocate();
}
return matResult;
}
use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.
the class OpenCVTools method matToImagePlus.
/**
* Convert an OpenCV {@code MatVector} into an ImageJ {@code ImagePlus}.
*
* @param title
* @param mats
* @return
*/
public static ImagePlus matToImagePlus(String title, Mat... mats) {
ImageStack stack = null;
int nChannels = 1;
for (Mat mat : mats) {
if (stack == null) {
stack = new ImageStack(mat.cols(), mat.rows());
} else if (mat.channels() != nChannels) {
throw new IllegalArgumentException("Number of channels must be the same for all Mats!");
}
if (mat.channels() == 1) {
ImageProcessor ip = matToImageProcessor(mat);
stack.addSlice(ip);
} else {
nChannels = mat.channels();
MatVector split = new MatVector();
opencv_core.split(mat, split);
for (int c = 0; c < split.size(); c++) stack.addSlice(matToImageProcessor(split.get(c)));
}
}
ImagePlus imp = new ImagePlus(title, stack);
imp.setDimensions(nChannels, mats.length, 1);
return nChannels == 1 ? imp : new CompositeImage(imp);
}
Aggregations