use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method matToSimpleImage.
/**
* Convert a Mat to a {@link SimpleImage}.
* @param mat
* @param channel
* @return
*/
public static SimpleImage matToSimpleImage(Mat mat, int channel) {
Mat temp = mat;
if (mat.channels() > 1) {
temp = new Mat();
opencv_core.extractChannel(mat, temp, channel);
}
float[] pixels = extractPixels(temp, (float[]) null);
return SimpleImages.createFloatImage(pixels, mat.cols(), mat.rows());
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method getCircularStructuringElement.
/**
* Create a Mat depicting a circle of the specified radius.
* <p>
* Pixels within the circle have the value 1, pixels outside are 0.
*
* @param radius
* @return
* @deprecated {@link #createDisk(int, boolean)} gives more reliable shapes.
*/
@Deprecated
public static Mat getCircularStructuringElement(int radius) {
// TODO: Find out why this doesn't just call a standard request for a strel...
Mat strel = new Mat(radius * 2 + 1, radius * 2 + 1, opencv_core.CV_8UC1, Scalar.ZERO);
opencv_imgproc.circle(strel, new Point(radius, radius), radius, Scalar.ONE, -1, opencv_imgproc.LINE_8, 0);
return strel;
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method shrinkLabels.
/**
* Shrink labels to a single point.
* This works by effectively iterating through each label, and retaining only the labeled pixel that is closest to the centroid
* of all pixels with the same label - setting all other pixels within the component to zero.
*
* @param mat label mat (must be CV_32S)
* @return the labeled image, with only one pixel per label greater than zero
*/
public static Mat shrinkLabels(Mat mat) {
if (mat.channels() != 1)
throw new IllegalArgumentException("shrinkLabels requires a single-channel mat, but input has " + mat.channels() + " channels");
var points = labelsToPoints(mat);
var mat2 = new Mat(mat.rows(), mat.cols(), mat.type(), Scalar.ZERO);
try (IntIndexer idx2 = mat2.createIndexer()) {
for (var p : points) {
idx2.putDouble(p.inds, p.getValue());
}
}
return mat2;
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method labelImage.
/**
* Create a labelled image from a binary image using findContours and drawContours.
* @param matBinary
* @param matLabels
* @param contourRetrievalMode defined within OpenCV findContours
* @deprecated Use {@link #label(Mat, Mat, int)} instead.
*/
@Deprecated
public static void labelImage(Mat matBinary, Mat matLabels, int contourRetrievalMode) {
MatVector contours = new MatVector();
Mat hierarchy = new Mat();
opencv_imgproc.findContours(matBinary, contours, hierarchy, contourRetrievalMode, opencv_imgproc.CHAIN_APPROX_SIMPLE);
Point offset = new Point(0, 0);
for (int c = 0; c < contours.size(); c++) {
opencv_imgproc.drawContours(matLabels, contours, c, Scalar.all(c + 1), -1, 8, hierarchy, 2, offset);
}
hierarchy.close();
contours.close();
}
use of org.bytedeco.opencv.opencv_core.Mat in project qupath by qupath.
the class OpenCVTools method matToBufferedImage.
/**
* Convert a Mat to a BufferedImage.
* <p>
* If no ColorModel is specified, a grayscale model will be used for single-channel 8-bit
* images and RGB/ARGB for 3/4 channel 8-bit images.
* <p>
* For all other cases a ColorModel should be specified for meaningful display.
*
* @param mat
* @param colorModel
* @return
*/
public static BufferedImage matToBufferedImage(final Mat mat, ColorModel colorModel) {
int type;
int bpp = 0;
switch(mat.depth()) {
case opencv_core.CV_8U:
type = DataBuffer.TYPE_BYTE;
bpp = 8;
break;
case opencv_core.CV_8S:
// Byte is unsigned
type = DataBuffer.TYPE_SHORT;
bpp = 16;
break;
case opencv_core.CV_16U:
type = DataBuffer.TYPE_USHORT;
bpp = 16;
break;
case opencv_core.CV_16S:
type = DataBuffer.TYPE_SHORT;
bpp = 16;
break;
case opencv_core.CV_32S:
type = DataBuffer.TYPE_INT;
bpp = 32;
break;
case opencv_core.CV_32F:
type = DataBuffer.TYPE_FLOAT;
bpp = 32;
break;
default:
logger.warn("Unknown Mat depth {}, will default to CV64F ({})", mat.depth(), opencv_core.CV_64F);
case opencv_core.CV_64F:
type = DataBuffer.TYPE_DOUBLE;
bpp = 64;
}
// Create a suitable raster
int width = mat.cols();
int height = mat.rows();
int channels = mat.channels();
// We might generate an image for a special case
BufferedImage img = null;
// Handle some special cases
if (colorModel == null) {
if (type == DataBuffer.TYPE_BYTE) {
if (channels == 1) {
img = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
// TODO: Set the bytes
} else if (channels == 3) {
img = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
} else if (channels == 4) {
img = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
}
}
} else if (colorModel instanceof IndexColorModel) {
img = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_INDEXED, (IndexColorModel) colorModel);
}
// Create the image
WritableRaster raster;
if (img != null) {
raster = img.getRaster();
} else if (colorModel != null) {
raster = colorModel.createCompatibleWritableRaster(width, height);
img = new BufferedImage(colorModel, raster, false, null);
} else {
// Create some kind of raster we can use
var sampleModel = new BandedSampleModel(type, width, height, channels);
raster = WritableRaster.createWritableRaster(sampleModel, null);
// We do need a ColorModel or some description
colorModel = ColorModelFactory.getDummyColorModel(bpp * channels);
img = new BufferedImage(colorModel, raster, false, null);
}
MatVector matvector = new MatVector();
opencv_core.split(mat, matvector);
// We don't know which of the 3 supported array types will be needed yet...
int[] pixelsInt = null;
float[] pixelsFloat = null;
double[] pixelsDouble = null;
for (int b = 0; b < channels; b++) {
// Extract pixels for the current channel
Mat matChannel = matvector.get(b);
Indexer indexer = matChannel.createIndexer();
if (indexer instanceof UByteIndexer) {
if (pixelsInt == null)
pixelsInt = new int[width * height];
((UByteIndexer) indexer).get(0L, pixelsInt);
} else if (indexer instanceof UShortIndexer) {
if (pixelsInt == null)
pixelsInt = new int[width * height];
((UShortIndexer) indexer).get(0L, pixelsInt);
} else if (indexer instanceof FloatIndexer) {
if (pixelsFloat == null)
pixelsFloat = new float[width * height];
((FloatIndexer) indexer).get(0L, pixelsFloat);
} else if (indexer instanceof DoubleIndexer) {
if (pixelsDouble == null)
pixelsDouble = new double[width * height];
((DoubleIndexer) indexer).get(0L, pixelsDouble);
} else {
if (pixelsDouble == null)
pixelsDouble = new double[width * height];
// This is inefficient, but unlikely to occur too often
pixelsDouble = new double[width * height];
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
pixelsDouble[y * width + x] = indexer.getDouble(y, x, b);
}
}
}
// Set the samples
if (pixelsInt != null)
raster.setSamples(0, 0, width, height, b, pixelsInt);
else if (pixelsFloat != null)
raster.setSamples(0, 0, width, height, b, pixelsFloat);
else if (pixelsDouble != null)
raster.setSamples(0, 0, width, height, b, pixelsDouble);
}
return img;
}
Aggregations