Search in sources :

Example 6 with MatVector

use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.

the class OpenCVTools method filterZ.

/**
 * Filter filter along entries in the input list.
 * <p>
 * If each Mat in the list can be considered a consecutive 2D image plane from a z-stack,
 * this can be considered filtering along the z-dimension.
 *
 * @param mats
 * @param kernelZ
 * @param ind3D if -1, return filtered results for all mats, otherwise only return results for the mat at the specified ind3D
 * @param border
 * @return
 */
public static List<Mat> filterZ(List<Mat> mats, Mat kernelZ, int ind3D, int border) {
    /*
		 * We can avoid the rigmarole of applying the full filtering 
		 * by instead simply calculating the weighted sum corresponding to the convolution 
		 * around the z-slice of interest only.
		 */
    // ind3D >= 0;
    boolean doWeightedSums = true;
    if (doWeightedSums) {
        // Extract kernel values
        int ks = (int) kernelZ.total();
        double[] kernelArray = new double[ks];
        DoubleIndexer idx = kernelZ.createIndexer();
        idx.get(0L, kernelArray);
        idx.release();
        if (ind3D >= 0) {
            // Calculate weights for each image
            Mat result = filterSingleZ(mats, kernelArray, ind3D, border);
            return Arrays.asList(result);
        } else {
            List<Mat> output = new ArrayList<>();
            for (int i = 0; i < mats.size(); i++) {
                Mat result = filterSingleZ(mats, kernelArray, i, border);
                output.add(result);
            }
            return output;
        }
    }
    // Create a an array of images reshaped as column vectors
    Mat[] columns = new Mat[mats.size()];
    int nRows = 0;
    for (int i = 0; i < mats.size(); i++) {
        Mat mat = mats.get(i);
        nRows = mat.rows();
        columns[i] = mat.reshape(mat.channels(), mat.rows() * mat.cols());
    }
    // Concatenate columns, effectively meaning z dimension now along rows
    Mat matConcatZ = new Mat();
    opencv_core.hconcat(new MatVector(columns), matConcatZ);
    // Apply z filtering along rows
    if (kernelZ.rows() > 1)
        kernelZ = kernelZ.t().asMat();
    // Mat empty = new Mat(1, 1, opencv_core.CV_64FC1, Scalar.ONE);
    // opencv_imgproc.sepFilter2D(matConcatZ, matConcatZ, opencv_core.CV_32F, kernelZ, empty, null, 0.0, border);
    opencv_imgproc.filter2D(matConcatZ, matConcatZ, opencv_core.CV_32F, kernelZ, null, 0.0, border);
    int start = 0;
    int end = mats.size();
    if (ind3D >= 0) {
        start = ind3D;
        end = ind3D + 1;
    }
    // Reshape to create output list
    List<Mat> output = new ArrayList<>();
    for (int i = start; i < end; i++) {
        output.add(matConcatZ.col(i).clone().reshape(matConcatZ.channels(), nRows));
    }
    return output;
}
Also used : Mat(org.bytedeco.opencv.opencv_core.Mat) DoubleIndexer(org.bytedeco.javacpp.indexer.DoubleIndexer) ArrayList(java.util.ArrayList) MatVector(org.bytedeco.opencv.opencv_core.MatVector) Point(org.bytedeco.opencv.opencv_core.Point)

Example 7 with MatVector

use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.

the class OpenCVTools method labelImage.

/**
 * Create a labelled image from a binary image using findContours and drawContours.
 * @param matBinary
 * @param matLabels
 * @param contourRetrievalMode defined within OpenCV findContours
 * @deprecated Use {@link #label(Mat, Mat, int)} instead.
 */
@Deprecated
public static void labelImage(Mat matBinary, Mat matLabels, int contourRetrievalMode) {
    MatVector contours = new MatVector();
    Mat hierarchy = new Mat();
    opencv_imgproc.findContours(matBinary, contours, hierarchy, contourRetrievalMode, opencv_imgproc.CHAIN_APPROX_SIMPLE);
    Point offset = new Point(0, 0);
    for (int c = 0; c < contours.size(); c++) {
        opencv_imgproc.drawContours(matLabels, contours, c, Scalar.all(c + 1), -1, 8, hierarchy, 2, offset);
    }
    hierarchy.close();
    contours.close();
}
Also used : Mat(org.bytedeco.opencv.opencv_core.Mat) MatVector(org.bytedeco.opencv.opencv_core.MatVector) Point(org.bytedeco.opencv.opencv_core.Point) Point(org.bytedeco.opencv.opencv_core.Point)

Example 8 with MatVector

use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.

the class WandToolCV method createShape.

@Override
protected Geometry createShape(MouseEvent e, double x, double y, boolean useTiles, Geometry addToShape) {
    GeometryFactory factory = getGeometryFactory();
    if (addToShape != null && pLast != null && pLast.distanceSq(x, y) < 2)
        return null;
    long startTime = System.currentTimeMillis();
    QuPathViewer viewer = getViewer();
    if (viewer == null)
        return null;
    double downsample = Math.max(1, Math.round(viewer.getDownsampleFactor() * 4)) / 4.0;
    var regionStore = viewer.getImageRegionStore();
    // Paint the image as it is currently being viewed
    var type = wandType.get();
    boolean doGray = type == WandType.GRAY;
    BufferedImage imgTemp = doGray ? imgGray : imgBGR;
    int nChannels = doGray ? 1 : 3;
    Graphics2D g2d = imgTemp.createGraphics();
    g2d.setColor(Color.BLACK);
    g2d.setClip(0, 0, w, w);
    g2d.fillRect(0, 0, w, w);
    double xStart = Math.round(x - w * downsample * 0.5);
    double yStart = Math.round(y - w * downsample * 0.5);
    bounds.setFrame(xStart, yStart, w * downsample, w * downsample);
    g2d.scale(1.0 / downsample, 1.0 / downsample);
    g2d.translate(-xStart, -yStart);
    regionStore.paintRegion(viewer.getServer(), g2d, bounds, viewer.getZPosition(), viewer.getTPosition(), downsample, null, null, viewer.getImageDisplay());
    // regionStore.paintRegionCompletely(viewer.getServer(), g2d, bounds, viewer.getZPosition(), viewer.getTPosition(), viewer.getDownsampleFactor(), null, viewer.getImageDisplay(), 250);
    // Optionally include the overlay information when using the wand
    float opacity = viewer.getOverlayOptions().getOpacity();
    if (opacity > 0 && getWandUseOverlays()) {
        ImageRegion region = ImageRegion.createInstance((int) bounds.getX() - 1, (int) bounds.getY() - 1, (int) bounds.getWidth() + 2, (int) bounds.getHeight() + 2, viewer.getZPosition(), viewer.getTPosition());
        if (opacity < 1)
            g2d.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, opacity));
        for (PathOverlay overlay : viewer.getOverlayLayers().toArray(PathOverlay[]::new)) {
            if (!(overlay instanceof HierarchyOverlay))
                overlay.paintOverlay(g2d, region, downsample, viewer.getImageData(), true);
        }
    }
    // Ensure we have Mats & the correct channel number
    if (mat != null && (mat.channels() != nChannels || mat.depth() != opencv_core.CV_8U)) {
        mat.close();
        mat = null;
    }
    if (mat == null || mat.isNull() || mat.empty())
        mat = new Mat(w, w, CV_8UC(nChannels));
    // if (matMask == null)
    // matMask = new Mat(w+2, w+2, CV_8U);
    // if (matSelected == null)
    // matSelected = new Mat(w+2, w+2, CV_8U);
    // Put pixels into an OpenCV image
    byte[] buffer = ((DataBufferByte) imgTemp.getRaster().getDataBuffer()).getData();
    ByteBuffer matBuffer = mat.createBuffer();
    matBuffer.put(buffer);
    // mat.put(0, 0, buffer);
    // opencv_imgproc.cvtColor(mat, mat, opencv_imgproc.COLOR_BGR2Lab);
    // blurSigma = 4;
    boolean doSimpleSelection = e.isShortcutDown() && !e.isShiftDown();
    if (doSimpleSelection) {
        matMask.put(Scalar.ZERO);
        // opencv_imgproc.circle(matMask, seed, radius, Scalar.ONE);
        opencv_imgproc.floodFill(mat, matMask, seed, Scalar.ONE, null, Scalar.ZERO, Scalar.ZERO, 4 | (2 << 8) | opencv_imgproc.FLOODFILL_MASK_ONLY | opencv_imgproc.FLOODFILL_FIXED_RANGE);
        subtractPut(matMask, Scalar.ONE);
    } else {
        double blurSigma = Math.max(0.5, getWandSigmaPixels());
        int size = (int) Math.ceil(blurSigma * 2) * 2 + 1;
        blurSize.width(size);
        blurSize.height(size);
        // Smooth a little
        opencv_imgproc.GaussianBlur(mat, mat, blurSize, blurSigma);
        // Choose mat to threshold (may be adjusted)
        Mat matThreshold = mat;
        // Apply color transform if required
        if (type == WandType.LAB_DISTANCE) {
            mat.convertTo(matFloat, opencv_core.CV_32F, 1.0 / 255.0, 0.0);
            opencv_imgproc.cvtColor(matFloat, matFloat, opencv_imgproc.COLOR_BGR2Lab);
            double max = 0;
            double mean = 0;
            try (FloatIndexer idx = matFloat.createIndexer()) {
                int k = w / 2;
                double v1 = idx.get(k, k, 0);
                double v2 = idx.get(k, k, 1);
                double v3 = idx.get(k, k, 2);
                double meanScale = 1.0 / (w * w);
                for (int row = 0; row < w; row++) {
                    for (int col = 0; col < w; col++) {
                        double L = idx.get(row, col, 0) - v1;
                        double A = idx.get(row, col, 1) - v2;
                        double B = idx.get(row, col, 2) - v3;
                        double dist = Math.sqrt(L * L + A * A + B * B);
                        if (dist > max)
                            max = dist;
                        mean += dist * meanScale;
                        idx.put(row, col, 0, (float) dist);
                    }
                }
            }
            if (matThreshold == null)
                matThreshold = new Mat();
            opencv_core.extractChannel(matFloat, matThreshold, 0);
            // There are various ways we might choose a threshold now...
            // Here, we use a multiple of the mean. Since values are 'distances'
            // they are all >= 0
            matThreshold.convertTo(matThreshold, opencv_core.CV_8U, 255.0 / max, 0);
            threshold.put(mean * getWandSensitivity());
            // //			OpenCVTools.matToImagePlus(matThreshold, "Before").show();
            // // Apply local Otsu threshold
            // opencv_imgproc.threshold(matThreshold, matThreshold,
            // 0,
            // 255, opencv_imgproc.THRESH_BINARY + opencv_imgproc.THRESH_OTSU);
            // threshold.put(Scalar.ZERO);
            nChannels = 1;
        } else {
            // Base threshold on local standard deviation
            meanStdDev(matThreshold, mean, stddev);
            DoubleBuffer stddevBuffer = stddev.createBuffer();
            double[] stddev2 = new double[nChannels];
            stddevBuffer.get(stddev2);
            double scale = 1.0 / getWandSensitivity();
            if (scale < 0)
                scale = 0.01;
            for (int i = 0; i < stddev2.length; i++) stddev2[i] = stddev2[i] * scale;
            threshold.put(stddev2);
        }
        // Limit maximum radius by pen
        int radius = (int) Math.round(w / 2 * QuPathPenManager.getPenManager().getPressure());
        if (radius == 0)
            return null;
        matMask.put(Scalar.ZERO);
        opencv_imgproc.circle(matMask, seed, radius, Scalar.ONE);
        opencv_imgproc.floodFill(matThreshold, matMask, seed, Scalar.ONE, null, threshold, threshold, 4 | (2 << 8) | opencv_imgproc.FLOODFILL_MASK_ONLY | opencv_imgproc.FLOODFILL_FIXED_RANGE);
        subtractPut(matMask, Scalar.ONE);
        if (strel == null)
            strel = opencv_imgproc.getStructuringElement(opencv_imgproc.MORPH_ELLIPSE, new Size(5, 5));
        opencv_imgproc.morphologyEx(matMask, matMask, opencv_imgproc.MORPH_CLOSE, strel);
    }
    MatVector contours = new MatVector();
    if (contourHierarchy == null)
        contourHierarchy = new Mat();
    opencv_imgproc.findContours(matMask, contours, contourHierarchy, opencv_imgproc.RETR_EXTERNAL, opencv_imgproc.CHAIN_APPROX_SIMPLE);
    // logger.trace("Contours: " + contours.size());
    List<Coordinate> coords = new ArrayList<>();
    List<Geometry> geometries = new ArrayList<>();
    for (Mat contour : contours.get()) {
        // Discard single pixels / lines
        if (contour.size().height() <= 2)
            continue;
        // Create a polygon geometry
        try (IntIndexer idxrContours = contour.createIndexer()) {
            for (long r = 0; r < idxrContours.size(0); r++) {
                int px = idxrContours.get(r, 0L, 0L);
                int py = idxrContours.get(r, 0L, 1L);
                // * downsample + x;
                double xx = (px - w / 2 - 1);
                // * downsample + y;
                double yy = (py - w / 2 - 1);
                coords.add(new Coordinate(xx, yy));
            }
        }
        if (coords.size() > 1) {
            // Ensure closed
            if (!coords.get(coords.size() - 1).equals(coords.get(0)))
                coords.add(coords.get(0));
            // Exclude single pixels
            var polygon = factory.createPolygon(coords.toArray(Coordinate[]::new));
            if (coords.size() > 5 || polygon.getArea() > 1)
                geometries.add(polygon);
        }
    }
    contours.close();
    if (geometries.isEmpty())
        return null;
    // Handle the fact that OpenCV contours are defined using the 'pixel center' by dilating the boundary
    var geometry = geometries.size() == 1 ? geometries.get(0) : GeometryCombiner.combine(geometries);
    geometry = geometry.buffer(0.5);
    // Transform to map to integer pixel locations in the full-resolution image
    var transform = new AffineTransformation().scale(downsample, downsample).translate(x, y);
    geometry = transform.transform(geometry);
    geometry = GeometryTools.roundCoordinates(geometry);
    geometry = GeometryTools.constrainToBounds(geometry, 0, 0, viewer.getServerWidth(), viewer.getServerHeight());
    if (geometry.getArea() <= 1)
        return null;
    long endTime = System.currentTimeMillis();
    logger.trace(getClass().getSimpleName() + " time: " + (endTime - startTime));
    if (pLast == null)
        pLast = new Point2D.Double(x, y);
    else
        pLast.setLocation(x, y);
    return geometry;
}
Also used : Mat(org.bytedeco.opencv.opencv_core.Mat) GeometryFactory(org.locationtech.jts.geom.GeometryFactory) Size(org.bytedeco.opencv.opencv_core.Size) ArrayList(java.util.ArrayList) ImageRegion(qupath.lib.regions.ImageRegion) FloatIndexer(org.bytedeco.javacpp.indexer.FloatIndexer) DataBufferByte(java.awt.image.DataBufferByte) BufferedImage(java.awt.image.BufferedImage) AffineTransformation(org.locationtech.jts.geom.util.AffineTransformation) MatVector(org.bytedeco.opencv.opencv_core.MatVector) IntIndexer(org.bytedeco.javacpp.indexer.IntIndexer) QuPathViewer(qupath.lib.gui.viewer.QuPathViewer) DoubleBuffer(java.nio.DoubleBuffer) ByteBuffer(java.nio.ByteBuffer) Point(org.bytedeco.opencv.opencv_core.Point) Graphics2D(java.awt.Graphics2D) HierarchyOverlay(qupath.lib.gui.viewer.overlays.HierarchyOverlay) Geometry(org.locationtech.jts.geom.Geometry) Coordinate(org.locationtech.jts.geom.Coordinate) PathOverlay(qupath.lib.gui.viewer.overlays.PathOverlay)

Example 9 with MatVector

use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.

the class PixelClassifierTraining method updateTrainingData.

private synchronized ClassifierTrainingData updateTrainingData(Map<PathClass, Integer> labelMap, Collection<ImageData<BufferedImage>> imageDataCollection) throws IOException {
    if (imageDataCollection.isEmpty()) {
        resetTrainingData();
        return null;
    }
    Map<PathClass, Integer> labels = new LinkedHashMap<>();
    boolean hasLockedAnnotations = false;
    if (labelMap == null) {
        Set<PathClass> pathClasses = new TreeSet<>((p1, p2) -> p1.toString().compareTo(p2.toString()));
        for (var imageData : imageDataCollection) {
            // Get labels for all annotations
            Collection<PathObject> annotations = imageData.getHierarchy().getAnnotationObjects();
            for (var annotation : annotations) {
                if (isTrainableAnnotation(annotation, true)) {
                    var pathClass = annotation.getPathClass();
                    pathClasses.add(pathClass);
                    // We only use boundary classes for areas
                    if (annotation.getROI().isArea()) {
                        var boundaryClass = boundaryStrategy.getBoundaryClass(pathClass);
                        if (boundaryClass != null)
                            pathClasses.add(boundaryClass);
                    }
                } else if (isTrainableAnnotation(annotation, false))
                    hasLockedAnnotations = true;
            }
        }
        int lab = 0;
        for (PathClass pathClass : pathClasses) {
            Integer temp = Integer.valueOf(lab);
            labels.put(pathClass, temp);
            lab++;
        }
    } else {
        labels.putAll(labelMap);
    }
    List<Mat> allFeatures = new ArrayList<>();
    List<Mat> allTargets = new ArrayList<>();
    for (var imageData : imageDataCollection) {
        // Get features & targets for all the tiles that we need
        var featureServer = getFeatureServer(imageData);
        if (featureServer != null) {
            var tiles = featureServer.getTileRequestManager().getAllTileRequests();
            for (var tile : tiles) {
                var tileFeatures = getTileFeatures(tile.getRegionRequest(), featureServer, boundaryStrategy, labels);
                if (tileFeatures != null) {
                    allFeatures.add(tileFeatures.getFeatures());
                    allTargets.add(tileFeatures.getTargets());
                }
            }
        } else {
            logger.warn("Unable to generate features for {}", imageData);
        }
    }
    // We need at least two classes for anything very meaningful to happen
    int nTargets = labels.size();
    if (nTargets <= 1) {
        logger.warn("Unlocked annotations for at least two classes are required to train a classifier!");
        if (hasLockedAnnotations)
            logger.warn("Image contains annotations that *could* be used for training, except they are currently locked. Please unlock them if they should be used.");
        resetTrainingData();
        return null;
    }
    if (matTraining == null)
        matTraining = new Mat();
    if (matTargets == null)
        matTargets = new Mat();
    opencv_core.vconcat(new MatVector(allFeatures.toArray(Mat[]::new)), matTraining);
    opencv_core.vconcat(new MatVector(allTargets.toArray(Mat[]::new)), matTargets);
    logger.debug("Training data: {} x {}, Target data: {} x {}", matTraining.rows(), matTraining.cols(), matTargets.rows(), matTargets.cols());
    if (matTraining.rows() == 0) {
        logger.warn("No training data found - if you have training annotations, check the features are compatible with the current image.");
        return null;
    }
    return new ClassifierTrainingData(labels, matTraining, matTargets);
}
Also used : Mat(org.bytedeco.opencv.opencv_core.Mat) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) PathClass(qupath.lib.objects.classes.PathClass) PathObject(qupath.lib.objects.PathObject) TreeSet(java.util.TreeSet) MatVector(org.bytedeco.opencv.opencv_core.MatVector)

Example 10 with MatVector

use of org.bytedeco.opencv.opencv_core.MatVector in project qupath by qupath.

the class OpenCVTools method fillSmallHoles.

/**
 * Fill holes in a binary image (1-channel, 8-bit unsigned) with an area &lt;= maxArea.
 *
 * @param matBinary
 * @param maxArea
 */
public static void fillSmallHoles(Mat matBinary, double maxArea) {
    Mat matHoles = new Mat();
    invertBinary(matBinary, matHoles);
    MatVector contours = new MatVector();
    Mat hierarchy = new Mat();
    opencv_imgproc.findContours(matHoles, contours, hierarchy, opencv_imgproc.RETR_CCOMP, opencv_imgproc.CHAIN_APPROX_SIMPLE);
    Scalar color = Scalar.WHITE;
    int ind = 0;
    Point offset = new Point(0, 0);
    Indexer indexerHierarchy = hierarchy.createIndexer();
    for (int c = 0; c < contours.size(); c++) {
        Mat contour = contours.get(c);
        // TODO: Check hierarchy indexing after switch to JavaCPP!!
        if (indexerHierarchy.getDouble(0, ind, 3) >= 0 || opencv_imgproc.contourArea(contour) > maxArea) {
            ind++;
            continue;
        }
        opencv_imgproc.drawContours(matBinary, contours, c, color, -1, opencv_imgproc.LINE_8, null, Integer.MAX_VALUE, offset);
        ind++;
    }
}
Also used : Mat(org.bytedeco.opencv.opencv_core.Mat) ByteIndexer(org.bytedeco.javacpp.indexer.ByteIndexer) IntIndexer(org.bytedeco.javacpp.indexer.IntIndexer) ShortIndexer(org.bytedeco.javacpp.indexer.ShortIndexer) DoubleIndexer(org.bytedeco.javacpp.indexer.DoubleIndexer) UByteIndexer(org.bytedeco.javacpp.indexer.UByteIndexer) UShortIndexer(org.bytedeco.javacpp.indexer.UShortIndexer) FloatIndexer(org.bytedeco.javacpp.indexer.FloatIndexer) Indexer(org.bytedeco.javacpp.indexer.Indexer) MatVector(org.bytedeco.opencv.opencv_core.MatVector) Point(org.bytedeco.opencv.opencv_core.Point) Point(org.bytedeco.opencv.opencv_core.Point) Scalar(org.bytedeco.opencv.opencv_core.Scalar)

Aggregations

Mat (org.bytedeco.opencv.opencv_core.Mat)12 MatVector (org.bytedeco.opencv.opencv_core.MatVector)12 Point (org.bytedeco.opencv.opencv_core.Point)8 ArrayList (java.util.ArrayList)4 PointerScope (org.bytedeco.javacpp.PointerScope)3 DoubleIndexer (org.bytedeco.javacpp.indexer.DoubleIndexer)3 FloatIndexer (org.bytedeco.javacpp.indexer.FloatIndexer)3 IntIndexer (org.bytedeco.javacpp.indexer.IntIndexer)3 BufferedImage (java.awt.image.BufferedImage)2 ByteIndexer (org.bytedeco.javacpp.indexer.ByteIndexer)2 Indexer (org.bytedeco.javacpp.indexer.Indexer)2 ShortIndexer (org.bytedeco.javacpp.indexer.ShortIndexer)2 UByteIndexer (org.bytedeco.javacpp.indexer.UByteIndexer)2 UShortIndexer (org.bytedeco.javacpp.indexer.UShortIndexer)2 CompositeImage (ij.CompositeImage)1 ImagePlus (ij.ImagePlus)1 ImageStack (ij.ImageStack)1 ImageProcessor (ij.process.ImageProcessor)1 Graphics2D (java.awt.Graphics2D)1 Area (java.awt.geom.Area)1