Search in sources :

Example 1 with DescriptorMatcher

use of org.opencv.features2d.DescriptorMatcher in project seleniumRobot by bhecquet.

the class ImageDetector method detectCorrespondingZone.

/**
 * Compute the rectangle where the searched picture is and the rotation angle between both images
 * Throw {@link ImageSearchException} if picture is not found
 * @return
 * @Deprecated Kept here for information, but open CV 3 does not include SURF anymore for java build
 */
public void detectCorrespondingZone() {
    Mat objectImageMat = Imgcodecs.imread(objectImage.getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
    Mat sceneImageMat = Imgcodecs.imread(sceneImage.getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
    FeatureDetector surf = FeatureDetector.create(FeatureDetector.SURF);
    MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
    MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();
    surf.detect(objectImageMat, objectKeyPoints);
    surf.detect(sceneImageMat, sceneKeyPoints);
    DescriptorExtractor surfExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
    Mat objectDescriptor = new Mat();
    Mat sceneDescriptor = new Mat();
    surfExtractor.compute(objectImageMat, objectKeyPoints, objectDescriptor);
    surfExtractor.compute(sceneImageMat, sceneKeyPoints, sceneDescriptor);
    try {
        Mat outImage = new Mat();
        Features2d.drawKeypoints(objectImageMat, objectKeyPoints, outImage);
        File tmpImg = File.createTempFile("img", ".png");
        tmpImg.deleteOnExit();
        String tempFile = tmpImg.getAbsolutePath();
        writeComparisonPictureToFile(tempFile, outImage);
    } catch (IOException e) {
    }
    // http://stackoverflow.com/questions/29828849/flann-for-opencv-java
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    MatOfDMatch matches = new MatOfDMatch();
    if (objectKeyPoints.toList().isEmpty()) {
        throw new ImageSearchException("No keypoints in object to search, check it's not uniformly coloured: " + objectImage.getAbsolutePath());
    }
    if (sceneKeyPoints.toList().isEmpty()) {
        throw new ImageSearchException("No keypoints in scene, check it's not uniformly coloured: " + sceneImage.getAbsolutePath());
    }
    if (objectDescriptor.type() != CvType.CV_32F) {
        objectDescriptor.convertTo(objectDescriptor, CvType.CV_32F);
    }
    if (sceneDescriptor.type() != CvType.CV_32F) {
        sceneDescriptor.convertTo(sceneDescriptor, CvType.CV_32F);
    }
    matcher.match(objectDescriptor, sceneDescriptor, matches);
    double maxDist = 0;
    double minDist = 10000;
    for (int i = 0; i < objectDescriptor.rows(); i++) {
        double dist = matches.toList().get(i).distance;
        if (dist < minDist) {
            minDist = dist;
        }
        if (dist > maxDist) {
            maxDist = dist;
        }
    }
    logger.debug("-- Max dist : " + maxDist);
    logger.debug("-- Min dist : " + minDist);
    LinkedList<DMatch> goodMatches = new LinkedList<>();
    MatOfDMatch gm = new MatOfDMatch();
    for (int i = 0; i < objectDescriptor.rows(); i++) {
        if (matches.toList().get(i).distance < detectionThreshold) {
            goodMatches.addLast(matches.toList().get(i));
        }
    }
    gm.fromList(goodMatches);
    Features2d.drawMatches(objectImageMat, objectKeyPoints, sceneImageMat, sceneKeyPoints, gm, imgMatch, Scalar.all(-1), Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS);
    if (goodMatches.isEmpty()) {
        throw new ImageSearchException("Cannot find matching zone");
    }
    LinkedList<Point> objList = new LinkedList<>();
    LinkedList<Point> sceneList = new LinkedList<>();
    List<KeyPoint> objectKeyPointsList = objectKeyPoints.toList();
    List<KeyPoint> sceneKeyPointsList = sceneKeyPoints.toList();
    for (int i = 0; i < goodMatches.size(); i++) {
        objList.addLast(objectKeyPointsList.get(goodMatches.get(i).queryIdx).pt);
        sceneList.addLast(sceneKeyPointsList.get(goodMatches.get(i).trainIdx).pt);
    }
    MatOfPoint2f obj = new MatOfPoint2f();
    obj.fromList(objList);
    MatOfPoint2f scene = new MatOfPoint2f();
    scene.fromList(sceneList);
    // Calib3d.RANSAC could be used instead of 0
    Mat hg = Calib3d.findHomography(obj, scene, 0, 5);
    Mat objectCorners = new Mat(4, 1, CvType.CV_32FC2);
    Mat sceneCorners = new Mat(4, 1, CvType.CV_32FC2);
    objectCorners.put(0, 0, 0, 0);
    objectCorners.put(1, 0, objectImageMat.cols(), 0);
    objectCorners.put(2, 0, objectImageMat.cols(), objectImageMat.rows());
    objectCorners.put(3, 0, 0, objectImageMat.rows());
    Core.perspectiveTransform(objectCorners, sceneCorners, hg);
    // points of object
    Point po1 = new Point(objectCorners.get(0, 0));
    Point po2 = new Point(objectCorners.get(1, 0));
    Point po3 = new Point(objectCorners.get(2, 0));
    Point po4 = new Point(objectCorners.get(3, 0));
    // point of object in scene
    // top left
    Point p1 = new Point(sceneCorners.get(0, 0));
    // top right
    Point p2 = new Point(sceneCorners.get(1, 0));
    // bottom right
    Point p3 = new Point(sceneCorners.get(2, 0));
    // bottom left
    Point p4 = new Point(sceneCorners.get(3, 0));
    logger.debug(po1);
    logger.debug(po2);
    logger.debug(po3);
    logger.debug(po4);
    // top left
    logger.debug(p1);
    // top right
    logger.debug(p2);
    // bottom right
    logger.debug(p3);
    // bottom left
    logger.debug(p4);
    if (debug) {
        try {
            // translate corners
            p1.set(new double[] { p1.x + objectImageMat.cols(), p1.y });
            p2.set(new double[] { p2.x + objectImageMat.cols(), p2.y });
            p3.set(new double[] { p3.x + objectImageMat.cols(), p3.y });
            p4.set(new double[] { p4.x + objectImageMat.cols(), p4.y });
            Imgproc.line(imgMatch, p1, p2, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p2, p3, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p3, p4, new Scalar(0, 255, 0), 1);
            Imgproc.line(imgMatch, p4, p1, new Scalar(0, 255, 0), 1);
            showResultingPicture(imgMatch);
        } catch (IOException e) {
        }
    }
    // check rotation angles
    checkRotationAngle(p1, p2, p3, p4, po1, po2, po3, po4);
    // rework on scene points as new, we are sure the object rotation is 0, 90, 180 or 270°
    reworkOnScenePoints(p1, p2, p3, p4);
    // check that aspect ratio of the detected height and width are the same
    checkDetectionZoneAspectRatio(p1, p2, p4, po1, po2, po4);
    recordDetectedRectangle(p1, p2, p3, p4);
}
Also used : Mat(org.opencv.core.Mat) MatOfKeyPoint(org.opencv.core.MatOfKeyPoint) MatOfPoint2f(org.opencv.core.MatOfPoint2f) ImageSearchException(com.seleniumtests.customexception.ImageSearchException) IOException(java.io.IOException) Point(org.opencv.core.Point) MatOfKeyPoint(org.opencv.core.MatOfKeyPoint) KeyPoint(org.opencv.core.KeyPoint) Point(org.opencv.core.Point) MatOfKeyPoint(org.opencv.core.MatOfKeyPoint) KeyPoint(org.opencv.core.KeyPoint) LinkedList(java.util.LinkedList) FeatureDetector(org.opencv.features2d.FeatureDetector) Scalar(org.opencv.core.Scalar) DescriptorExtractor(org.opencv.features2d.DescriptorExtractor) MatOfKeyPoint(org.opencv.core.MatOfKeyPoint) KeyPoint(org.opencv.core.KeyPoint) MatOfDMatch(org.opencv.core.MatOfDMatch) DMatch(org.opencv.core.DMatch) MatOfDMatch(org.opencv.core.MatOfDMatch) DescriptorMatcher(org.opencv.features2d.DescriptorMatcher) MatOfByte(org.opencv.core.MatOfByte) File(java.io.File)

Example 2 with DescriptorMatcher

use of org.opencv.features2d.DescriptorMatcher in project kifu-recorder by leonardost.

the class BoardDetectorByImageSimilarity method generateDescriptorMatches.

private void generateDescriptorMatches(Mat image1, Mat image2) {
    Mat processedImage1 = image1.clone();
    Mat processedImage2 = image2.clone();
    // Imgcodecs.imwrite("processing/difference_between_" + imageIndex + "_1.jpg", processedImage1);
    // Imgcodecs.imwrite("processing/difference_between_" + imageIndex + "_2.jpg", processedImage2);
    FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB);
    MatOfKeyPoint keypointsA = new MatOfKeyPoint();
    MatOfKeyPoint keypointsB = new MatOfKeyPoint();
    detector.detect(processedImage1, keypointsA);
    detector.detect(processedImage2, keypointsB);
    DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
    Mat descriptorsA = new Mat();
    Mat descriptorsB = new Mat();
    extractor.compute(processedImage1, keypointsA, descriptorsA);
    extractor.compute(processedImage2, keypointsB, descriptorsB);
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
    MatOfDMatch matches = new MatOfDMatch();
    matcher.match(descriptorsA, descriptorsB, matches);
    matchesList = matches.toList();
    Collections.sort(matchesList, new Comparator<DMatch>() {

        public int compare(DMatch a, DMatch b) {
            if (a.distance < b.distance)
                return -1;
            else if (a.distance > b.distance)
                return 1;
            return 0;
        }
    });
}
Also used : Mat(org.opencv.core.Mat) DescriptorExtractor(org.opencv.features2d.DescriptorExtractor) MatOfKeyPoint(org.opencv.core.MatOfKeyPoint) MatOfDMatch(org.opencv.core.MatOfDMatch) DMatch(org.opencv.core.DMatch) MatOfDMatch(org.opencv.core.MatOfDMatch) DescriptorMatcher(org.opencv.features2d.DescriptorMatcher) FeatureDetector(org.opencv.features2d.FeatureDetector)

Example 3 with DescriptorMatcher

use of org.opencv.features2d.DescriptorMatcher in project Aki by letmeNo1.

the class getKnnMatchesMultiple method main.

public static void main(String[] args) throws Exception {
    System.loadLibrary(org.opencv.core.Core.NATIVE_LIBRARY_NAME);
    Mat imgObject = imread("C:\\Users\\CNHAHUA16\\Desktop\\2.png", IMREAD_GRAYSCALE);
    // 获取用于定位的图片
    Mat imgScene = imread("C:\\Users\\CNHAHUA16\\Desktop\\1.png", IMREAD_GRAYSCALE);
    if (imgObject.empty() || imgScene.empty()) {
        System.err.println("Cannot read images!");
        System.exit(0);
    }
    // 创建Sift检测器
    SIFT sift = SIFT.create(0, 3, 0.04, 10, 1.6);
    // 存放imgObject的特征值
    MatOfKeyPoint keypointsObject = new MatOfKeyPoint();
    // 存放imgScene的特征值
    MatOfKeyPoint keypointsScene = new MatOfKeyPoint();
    // 分析计算两图的特征值矩阵
    Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
    sift.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
    sift.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
    // 使用基于FLANN的匹配器,筛选符合条件的坐标
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    List<MatOfDMatch> knnMatches = new ArrayList<>();
    matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
    // 设置过滤值
    float ratioThresh = 0.4f;
    List<DMatch> listOfGoodMatches = new ArrayList<>();
    for (MatOfDMatch knnMatch : knnMatches) {
        // distance越小匹配度越高
        if (knnMatch.rows() > 1) {
            DMatch[] matches = knnMatch.toArray();
            if (matches[0].distance < ratioThresh * matches[1].distance) {
                listOfGoodMatches.add(matches[0]);
            }
        }
    }
    if (listOfGoodMatches.size() == 0) {
        throw new RuntimeException("No match can be found");
    }
    MatOfDMatch goodMatches = new MatOfDMatch();
    // 将最佳匹配转回 MatOfDMatch
    goodMatches.fromList(listOfGoodMatches);
    // 将原图上的特征值转为数组
    List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
    // 获取原图上匹配到的特征值的坐标的合集
    ArrayList<float[]> dataSet = new ArrayList<>();
    for (DMatch listOfGoodMatch : listOfGoodMatches) {
        dataSet.add(new float[] { (float) listOfKeypointsObject.get(listOfGoodMatch.queryIdx).pt.x, (float) listOfKeypointsObject.get(listOfGoodMatch.queryIdx).pt.y });
    }
    KMeansRun kRun = new KMeansRun(3, dataSet);
    Set<Cluster> clusterSet = kRun.run();
    ArrayList<Point> pointArray = new ArrayList<>();
    for (Cluster cluster : clusterSet) {
        Point point = new Point(cluster.getCenter().getlocalArray()[0], cluster.getCenter().getlocalArray()[1]);
        pointArray.add(point);
        System.out.println(cluster);
        System.out.println(cluster.getCenter());
        circle(imgObject, new Point(cluster.getCenter().getlocalArray()[0], cluster.getCenter().getlocalArray()[1]), 10, new Scalar(0, 0, 255), 3, FILLED);
    }
    imshow("location", imgObject);
    imwrite("C:\\Users\\CNHAHUA16\\Desktop\\3.jpg", imgObject);
    System.out.println(pointArray);
}
Also used : SIFT(org.opencv.features2d.SIFT) ArrayList(java.util.ArrayList) Cluster(aki.OpenCV.KMean.Cluster) KMeansRun(aki.OpenCV.KMean.KMeansRun) DescriptorMatcher(org.opencv.features2d.DescriptorMatcher)

Example 4 with DescriptorMatcher

use of org.opencv.features2d.DescriptorMatcher in project Aki by letmeNo1.

the class CallOpenCV method callKnnMatches.

public Point callKnnMatches(Mat imgObject, String imgScenePath, float ratioThreshValue) {
    // 获取用于定位的图片
    if (imgObject.empty()) {
        System.err.println("Cannot read images!");
        System.exit(0);
    }
    Mat imgScene = imread(imgScenePath);
    if (imgScene.empty()) {
        System.err.println("Cannot read images!");
        System.exit(0);
    }
    // 创建Sift检测器
    SIFT sift = SIFT.create(0, 3, 0.04, 10, 1.6);
    // 存放imgObject的特征值
    MatOfKeyPoint keypointsObject = new MatOfKeyPoint();
    // 存放imgScene的特征值
    MatOfKeyPoint keypointsScene = new MatOfKeyPoint();
    // 分析计算两图的特征值矩阵
    Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
    sift.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
    sift.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
    // 使用基于FLANN的匹配器,筛选符合条件的坐标
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    List<MatOfDMatch> knnMatches = new ArrayList<>();
    matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
    // 设置过滤值
    List<DMatch> listOfGoodMatches = new ArrayList<>();
    for (MatOfDMatch knnMatch : knnMatches) {
        // distance越小匹配度越高
        if (knnMatch.rows() > 1) {
            DMatch[] matches = knnMatch.toArray();
            if (matches[0].distance < ratioThreshValue * matches[1].distance) {
                listOfGoodMatches.add(matches[0]);
            }
        }
    }
    if (listOfGoodMatches.size() <= 1) {
        float x = -1, y = -1;
        return new Point(x, y);
    } else {
        MatOfDMatch goodMatches = new MatOfDMatch();
        // 将最佳匹配转回 MatOfDMatch
        goodMatches.fromList(listOfGoodMatches);
        // 将原图上的特征值转为数组
        List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
        // 获取原图上匹配到的特征值的坐标的合集
        ArrayList<DataNode> dpoints = new ArrayList<>();
        for (int i = 0; i < listOfGoodMatches.size(); i++) {
            dpoints.add(new DataNode("Point-" + i, new double[] { listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt.x, listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt.y }));
        }
        // 使用LOF算法筛除错误的点
        float x = -1, y = -1;
        LOF lof = new LOF();
        List<DataNode> nodeList = lof.getOutlierNode(dpoints);
        int j = 0;
        for (int i = 0; i < nodeList.size(); i++) {
            if (i >= Math.round(nodeList.size() * 0.8)) {
                j++;
                x += nodeList.get(i).getDimensioin()[0];
                y += nodeList.get(i).getDimensioin()[1];
            }
        }
        // 求坐标平均值
        x = x / j;
        y = y / j;
        return new Point(x, y);
    }
}
Also used : SIFT(org.opencv.features2d.SIFT) LOF(aki.OpenCV.LOF.LOF) ArrayList(java.util.ArrayList) DescriptorMatcher(org.opencv.features2d.DescriptorMatcher) DataNode(aki.OpenCV.LOF.DataNode)

Example 5 with DescriptorMatcher

use of org.opencv.features2d.DescriptorMatcher in project Aki by letmeNo1.

the class CallOpenCV method callKnnMatchesMultiple.

public ArrayList<Point> callKnnMatchesMultiple(Mat imgObject, String imgScenePath, float ratioThreshValue, int k) {
    // 获取原图
    // 获取用于定位的图片
    Mat imgScene = imread(imgScenePath);
    if (imgObject.empty() || imgScene.empty()) {
        System.err.println("Cannot read images!");
        System.exit(0);
    }
    // 创建Sift检测器
    SIFT sift = SIFT.create(0, 3, 0.04, 10, 1.6);
    // 存放imgObject的特征值
    MatOfKeyPoint keypointsObject = new MatOfKeyPoint();
    // 存放imgScene的特征值
    MatOfKeyPoint keypointsScene = new MatOfKeyPoint();
    // 分析计算两图的特征值矩阵
    Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
    sift.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
    sift.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
    // 使用基于FLANN的匹配器,筛选符合条件的坐标
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    List<MatOfDMatch> knnMatches = new ArrayList<>();
    matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
    // 设置过滤值
    List<DMatch> listOfGoodMatches = new ArrayList<>();
    for (MatOfDMatch knnMatch : knnMatches) {
        // distance越小匹配度越高
        if (knnMatch.rows() > 1) {
            DMatch[] matches = knnMatch.toArray();
            if (matches[0].distance < ratioThreshValue * matches[1].distance) {
                listOfGoodMatches.add(matches[0]);
            }
        }
    }
    if (listOfGoodMatches.size() == 0) {
        return new ArrayList<>();
    }
    MatOfDMatch goodMatches = new MatOfDMatch();
    // 将最佳匹配转回 MatOfDMatch
    goodMatches.fromList(listOfGoodMatches);
    // 将原图上的特征值转为数组
    List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
    // 获取原图上匹配到的特征值的坐标的合集
    ArrayList<float[]> dataSet = new ArrayList<>();
    for (DMatch listOfGoodMatch : listOfGoodMatches) {
        dataSet.add(new float[] { (float) listOfKeypointsObject.get(listOfGoodMatch.queryIdx).pt.x, (float) listOfKeypointsObject.get(listOfGoodMatch.queryIdx).pt.y });
    }
    KMeansRun kRun = new KMeansRun(k, dataSet);
    Set<Cluster> clusterSet = kRun.run();
    ArrayList<Point> pointArray = new ArrayList<>();
    for (Cluster cluster : clusterSet) {
        Point point = new Point(cluster.getCenter().getlocalArray()[0], cluster.getCenter().getlocalArray()[1]);
        pointArray.add(point);
    }
    return pointArray;
}
Also used : SIFT(org.opencv.features2d.SIFT) ArrayList(java.util.ArrayList) Cluster(aki.OpenCV.KMean.Cluster) KMeansRun(aki.OpenCV.KMean.KMeansRun) DescriptorMatcher(org.opencv.features2d.DescriptorMatcher)

Aggregations

DescriptorMatcher (org.opencv.features2d.DescriptorMatcher)5 ArrayList (java.util.ArrayList)3 SIFT (org.opencv.features2d.SIFT)3 Cluster (aki.OpenCV.KMean.Cluster)2 KMeansRun (aki.OpenCV.KMean.KMeansRun)2 DMatch (org.opencv.core.DMatch)2 Mat (org.opencv.core.Mat)2 MatOfDMatch (org.opencv.core.MatOfDMatch)2 MatOfKeyPoint (org.opencv.core.MatOfKeyPoint)2 DescriptorExtractor (org.opencv.features2d.DescriptorExtractor)2 FeatureDetector (org.opencv.features2d.FeatureDetector)2 DataNode (aki.OpenCV.LOF.DataNode)1 LOF (aki.OpenCV.LOF.LOF)1 ImageSearchException (com.seleniumtests.customexception.ImageSearchException)1 File (java.io.File)1 IOException (java.io.IOException)1 LinkedList (java.util.LinkedList)1 KeyPoint (org.opencv.core.KeyPoint)1 MatOfByte (org.opencv.core.MatOfByte)1 MatOfPoint2f (org.opencv.core.MatOfPoint2f)1