use of qupath.lib.regions.ImageRegion in project qupath by qupath.
the class IJTools method convertToROI.
/**
* Create a ROI from an ImageJ Roi.
*
* @param roi
* @param pathImage
* @return
*/
public static <T extends PathImage<? extends ImagePlus>> ROI convertToROI(Roi roi, T pathImage) {
Calibration cal = null;
double downsampleFactor = 1;
ImageRegion region = pathImage.getImageRegion();
if (pathImage != null) {
cal = pathImage.getImage().getCalibration();
downsampleFactor = pathImage.getDownsampleFactor();
}
return convertToROI(roi, cal, downsampleFactor, region.getPlane());
}
use of qupath.lib.regions.ImageRegion in project qupath by qupath.
the class WandToolCV method createShape.
@Override
protected Geometry createShape(MouseEvent e, double x, double y, boolean useTiles, Geometry addToShape) {
GeometryFactory factory = getGeometryFactory();
if (addToShape != null && pLast != null && pLast.distanceSq(x, y) < 2)
return null;
long startTime = System.currentTimeMillis();
QuPathViewer viewer = getViewer();
if (viewer == null)
return null;
double downsample = Math.max(1, Math.round(viewer.getDownsampleFactor() * 4)) / 4.0;
var regionStore = viewer.getImageRegionStore();
// Paint the image as it is currently being viewed
var type = wandType.get();
boolean doGray = type == WandType.GRAY;
BufferedImage imgTemp = doGray ? imgGray : imgBGR;
int nChannels = doGray ? 1 : 3;
Graphics2D g2d = imgTemp.createGraphics();
g2d.setColor(Color.BLACK);
g2d.setClip(0, 0, w, w);
g2d.fillRect(0, 0, w, w);
double xStart = Math.round(x - w * downsample * 0.5);
double yStart = Math.round(y - w * downsample * 0.5);
bounds.setFrame(xStart, yStart, w * downsample, w * downsample);
g2d.scale(1.0 / downsample, 1.0 / downsample);
g2d.translate(-xStart, -yStart);
regionStore.paintRegion(viewer.getServer(), g2d, bounds, viewer.getZPosition(), viewer.getTPosition(), downsample, null, null, viewer.getImageDisplay());
// regionStore.paintRegionCompletely(viewer.getServer(), g2d, bounds, viewer.getZPosition(), viewer.getTPosition(), viewer.getDownsampleFactor(), null, viewer.getImageDisplay(), 250);
// Optionally include the overlay information when using the wand
float opacity = viewer.getOverlayOptions().getOpacity();
if (opacity > 0 && getWandUseOverlays()) {
ImageRegion region = ImageRegion.createInstance((int) bounds.getX() - 1, (int) bounds.getY() - 1, (int) bounds.getWidth() + 2, (int) bounds.getHeight() + 2, viewer.getZPosition(), viewer.getTPosition());
if (opacity < 1)
g2d.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, opacity));
for (PathOverlay overlay : viewer.getOverlayLayers().toArray(PathOverlay[]::new)) {
if (!(overlay instanceof HierarchyOverlay))
overlay.paintOverlay(g2d, region, downsample, viewer.getImageData(), true);
}
}
// Ensure we have Mats & the correct channel number
if (mat != null && (mat.channels() != nChannels || mat.depth() != opencv_core.CV_8U)) {
mat.close();
mat = null;
}
if (mat == null || mat.isNull() || mat.empty())
mat = new Mat(w, w, CV_8UC(nChannels));
// if (matMask == null)
// matMask = new Mat(w+2, w+2, CV_8U);
// if (matSelected == null)
// matSelected = new Mat(w+2, w+2, CV_8U);
// Put pixels into an OpenCV image
byte[] buffer = ((DataBufferByte) imgTemp.getRaster().getDataBuffer()).getData();
ByteBuffer matBuffer = mat.createBuffer();
matBuffer.put(buffer);
// mat.put(0, 0, buffer);
// opencv_imgproc.cvtColor(mat, mat, opencv_imgproc.COLOR_BGR2Lab);
// blurSigma = 4;
boolean doSimpleSelection = e.isShortcutDown() && !e.isShiftDown();
if (doSimpleSelection) {
matMask.put(Scalar.ZERO);
// opencv_imgproc.circle(matMask, seed, radius, Scalar.ONE);
opencv_imgproc.floodFill(mat, matMask, seed, Scalar.ONE, null, Scalar.ZERO, Scalar.ZERO, 4 | (2 << 8) | opencv_imgproc.FLOODFILL_MASK_ONLY | opencv_imgproc.FLOODFILL_FIXED_RANGE);
subtractPut(matMask, Scalar.ONE);
} else {
double blurSigma = Math.max(0.5, getWandSigmaPixels());
int size = (int) Math.ceil(blurSigma * 2) * 2 + 1;
blurSize.width(size);
blurSize.height(size);
// Smooth a little
opencv_imgproc.GaussianBlur(mat, mat, blurSize, blurSigma);
// Choose mat to threshold (may be adjusted)
Mat matThreshold = mat;
// Apply color transform if required
if (type == WandType.LAB_DISTANCE) {
mat.convertTo(matFloat, opencv_core.CV_32F, 1.0 / 255.0, 0.0);
opencv_imgproc.cvtColor(matFloat, matFloat, opencv_imgproc.COLOR_BGR2Lab);
double max = 0;
double mean = 0;
try (FloatIndexer idx = matFloat.createIndexer()) {
int k = w / 2;
double v1 = idx.get(k, k, 0);
double v2 = idx.get(k, k, 1);
double v3 = idx.get(k, k, 2);
double meanScale = 1.0 / (w * w);
for (int row = 0; row < w; row++) {
for (int col = 0; col < w; col++) {
double L = idx.get(row, col, 0) - v1;
double A = idx.get(row, col, 1) - v2;
double B = idx.get(row, col, 2) - v3;
double dist = Math.sqrt(L * L + A * A + B * B);
if (dist > max)
max = dist;
mean += dist * meanScale;
idx.put(row, col, 0, (float) dist);
}
}
}
if (matThreshold == null)
matThreshold = new Mat();
opencv_core.extractChannel(matFloat, matThreshold, 0);
// There are various ways we might choose a threshold now...
// Here, we use a multiple of the mean. Since values are 'distances'
// they are all >= 0
matThreshold.convertTo(matThreshold, opencv_core.CV_8U, 255.0 / max, 0);
threshold.put(mean * getWandSensitivity());
// // OpenCVTools.matToImagePlus(matThreshold, "Before").show();
// // Apply local Otsu threshold
// opencv_imgproc.threshold(matThreshold, matThreshold,
// 0,
// 255, opencv_imgproc.THRESH_BINARY + opencv_imgproc.THRESH_OTSU);
// threshold.put(Scalar.ZERO);
nChannels = 1;
} else {
// Base threshold on local standard deviation
meanStdDev(matThreshold, mean, stddev);
DoubleBuffer stddevBuffer = stddev.createBuffer();
double[] stddev2 = new double[nChannels];
stddevBuffer.get(stddev2);
double scale = 1.0 / getWandSensitivity();
if (scale < 0)
scale = 0.01;
for (int i = 0; i < stddev2.length; i++) stddev2[i] = stddev2[i] * scale;
threshold.put(stddev2);
}
// Limit maximum radius by pen
int radius = (int) Math.round(w / 2 * QuPathPenManager.getPenManager().getPressure());
if (radius == 0)
return null;
matMask.put(Scalar.ZERO);
opencv_imgproc.circle(matMask, seed, radius, Scalar.ONE);
opencv_imgproc.floodFill(matThreshold, matMask, seed, Scalar.ONE, null, threshold, threshold, 4 | (2 << 8) | opencv_imgproc.FLOODFILL_MASK_ONLY | opencv_imgproc.FLOODFILL_FIXED_RANGE);
subtractPut(matMask, Scalar.ONE);
if (strel == null)
strel = opencv_imgproc.getStructuringElement(opencv_imgproc.MORPH_ELLIPSE, new Size(5, 5));
opencv_imgproc.morphologyEx(matMask, matMask, opencv_imgproc.MORPH_CLOSE, strel);
}
MatVector contours = new MatVector();
if (contourHierarchy == null)
contourHierarchy = new Mat();
opencv_imgproc.findContours(matMask, contours, contourHierarchy, opencv_imgproc.RETR_EXTERNAL, opencv_imgproc.CHAIN_APPROX_SIMPLE);
// logger.trace("Contours: " + contours.size());
List<Coordinate> coords = new ArrayList<>();
List<Geometry> geometries = new ArrayList<>();
for (Mat contour : contours.get()) {
// Discard single pixels / lines
if (contour.size().height() <= 2)
continue;
// Create a polygon geometry
try (IntIndexer idxrContours = contour.createIndexer()) {
for (long r = 0; r < idxrContours.size(0); r++) {
int px = idxrContours.get(r, 0L, 0L);
int py = idxrContours.get(r, 0L, 1L);
// * downsample + x;
double xx = (px - w / 2 - 1);
// * downsample + y;
double yy = (py - w / 2 - 1);
coords.add(new Coordinate(xx, yy));
}
}
if (coords.size() > 1) {
// Ensure closed
if (!coords.get(coords.size() - 1).equals(coords.get(0)))
coords.add(coords.get(0));
// Exclude single pixels
var polygon = factory.createPolygon(coords.toArray(Coordinate[]::new));
if (coords.size() > 5 || polygon.getArea() > 1)
geometries.add(polygon);
}
}
contours.close();
if (geometries.isEmpty())
return null;
// Handle the fact that OpenCV contours are defined using the 'pixel center' by dilating the boundary
var geometry = geometries.size() == 1 ? geometries.get(0) : GeometryCombiner.combine(geometries);
geometry = geometry.buffer(0.5);
// Transform to map to integer pixel locations in the full-resolution image
var transform = new AffineTransformation().scale(downsample, downsample).translate(x, y);
geometry = transform.transform(geometry);
geometry = GeometryTools.roundCoordinates(geometry);
geometry = GeometryTools.constrainToBounds(geometry, 0, 0, viewer.getServerWidth(), viewer.getServerHeight());
if (geometry.getArea() <= 1)
return null;
long endTime = System.currentTimeMillis();
logger.trace(getClass().getSimpleName() + " time: " + (endTime - startTime));
if (pLast == null)
pLast = new Point2D.Double(x, y);
else
pLast.setLocation(x, y);
return geometry;
}
use of qupath.lib.regions.ImageRegion in project qupath by qupath.
the class BufferedImageOverlay method paintOverlay.
// /**
// * Add another region to the overlay.
// * @param region
// * @param img
// * @return any existing region with the same key
// */
// public BufferedImage put(ImageRegion region, BufferedImage img) {
// var previous = regions.put(region, img);
// if (viewer != null)
// viewer.repaint();
// return previous;
// }
@Override
public void paintOverlay(Graphics2D g2d, ImageRegion imageRegion, double downsampleFactor, ImageData<BufferedImage> imageData, boolean paintCompletely) {
// Don't show if pixel classifications aren't being shown
if (!isVisible() || !getOverlayOptions().getShowPixelClassification())
return;
super.paintOverlay(g2d, imageRegion, downsampleFactor, imageData, paintCompletely);
// Paint the regions we have
for (Map.Entry<ImageRegion, BufferedImage> entry : regions.entrySet()) {
ImageRegion region = entry.getKey();
// Check if the region intersects or not
if (!imageRegion.intersects(region))
continue;
// Draw the region
BufferedImage img = entry.getValue();
if (colorModel != null && colorModel != img.getColorModel()) {
// Apply the color model to get a version of the image we can draw quickly
var imgRGB = cacheRGB.get(img);
if (imgRGB == null) {
var img2 = new BufferedImage(colorModel, img.getRaster(), img.getColorModel().isAlphaPremultiplied(), null);
imgRGB = convertToDrawable(img2);
cacheRGB.put(img, imgRGB);
}
img = imgRGB;
} else {
img = cacheRGB.computeIfAbsent(img, img2 -> convertToDrawable(img2));
}
g2d.drawImage(img, region.getX(), region.getY(), region.getWidth(), region.getHeight(), null);
}
}
use of qupath.lib.regions.ImageRegion in project qupath by qupath.
the class ViewTrackerDataMaps method updateDataMaps.
Map<ImageRegion, BufferedImage> updateDataMaps(long timeStart, long timeStop, double downsampleMin, double downsampleMax, Feature feature, ColorMap colorMap) {
if (timeStart == -1 || timeStop == -1 || downsampleMin == -1 || downsampleMax == -1 || colorMap == null)
return null;
var startTime = System.currentTimeMillis();
regionMaps.clear();
regionMapsOriginal.clear();
for (int z = 0; z < nZSlices; z++) {
for (int t = 0; t < nTimepoints; t++) {
var relevantFrames = getRelevantFrames(timeStart, timeStop, downsampleMin, downsampleMax, z, t);
ImageRegion region = ImageRegion.createInstance(0, 0, fullWidth, fullHeight, z, t);
var dataMap = new ViewTrackerDataMap(region, feature, relevantFrames, downsample, dataMapWidth, dataMapHeight);
regionMapsOriginal.put(region, dataMap);
regionMaps.put(region, dataMap.getBufferedImage(colorMap));
}
}
// TODO: remove next line
logger.info("Processing time for populateRegionMap(): " + (System.currentTimeMillis() - startTime));
return regionMaps;
}
use of qupath.lib.regions.ImageRegion in project qupath by qupath.
the class Commands method createFullImageAnnotation.
/**
* Create a full image annotation for the image in the specified viewer.
* The z and t positions of the viewer will be used.
* @param viewer the viewer containing the image to be processed
*/
public static void createFullImageAnnotation(QuPathViewer viewer) {
if (viewer == null)
return;
ImageData<?> imageData = viewer.getImageData();
if (imageData == null)
return;
PathObjectHierarchy hierarchy = imageData.getHierarchy();
// Check if we already have a comparable annotation
int z = viewer.getZPosition();
int t = viewer.getTPosition();
ImageRegion bounds = viewer.getServerBounds();
ROI roi = ROIs.createRectangleROI(bounds.getX(), bounds.getY(), bounds.getWidth(), bounds.getHeight(), ImagePlane.getPlane(z, t));
for (PathObject pathObject : hierarchy.getAnnotationObjects()) {
ROI r2 = pathObject.getROI();
if (r2 instanceof RectangleROI && roi.getBoundsX() == r2.getBoundsX() && roi.getBoundsY() == r2.getBoundsY() && roi.getBoundsWidth() == r2.getBoundsWidth() && roi.getBoundsHeight() == r2.getBoundsHeight() && roi.getImagePlane().equals(r2.getImagePlane())) {
logger.info("Full image annotation already exists! {}", pathObject);
viewer.setSelectedObject(pathObject);
return;
}
}
PathObject pathObject = PathObjects.createAnnotationObject(roi);
hierarchy.addPathObject(pathObject);
viewer.setSelectedObject(pathObject);
// Log in the history
if (z == 0 && t == 0)
imageData.getHistoryWorkflow().addStep(new DefaultScriptableWorkflowStep("Create full image annotation", "createSelectAllObject(true);"));
else
imageData.getHistoryWorkflow().addStep(new DefaultScriptableWorkflowStep("Create full image annotation", String.format("createSelectAllObject(true, %d, %d);", z, t)));
}
Aggregations