Search in sources :

Example 6 with Rect

use of org.opencv.core.Rect in project Frankenstein by olir.

the class SlideShowInput method process.

@Override
public Mat process(Mat sourceFrame, int frameId, FilterContext context) {
    int sid = (frameId - 1) / (fps * fpSlide);
    if (sid < slides.size()) {
        Slide s = slides.get(sid);
        File[] f = s.getFiles();
        Mat img = Imgcodecs.imread(f[0].getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
        img.convertTo(img, CvType.CV_8UC3);
        Imgproc.resize(img, tmpFrame, new Size((double) smallWidth, (double) smallHeight));
        Rect roi = new Rect(0, 0, smallWidth, smallHeight);
        tmpFrame.copyTo(new Mat(newFrame, roi));
        if (mode3D && f.length > 1) {
            img = Imgcodecs.imread(f[1].getAbsolutePath(), Imgcodecs.CV_LOAD_IMAGE_COLOR);
            img.convertTo(img, CvType.CV_8UC3);
            Imgproc.resize(img, tmpFrame, new Size((double) smallWidth, (double) smallHeight));
        }
        roi = new Rect(smallWidth, 0, smallWidth, smallHeight);
        tmpFrame.copyTo(new Mat(newFrame, roi));
    } else
        newFrame.setTo(new Scalar(0, 0, 0, 0));
    return newFrame;
}
Also used : Mat(org.opencv.core.Mat) Rect(org.opencv.core.Rect) Size(org.opencv.core.Size) File(java.io.File) Scalar(org.opencv.core.Scalar)

Example 7 with Rect

use of org.opencv.core.Rect in project Auto.js by hyb1996.

the class TemplateMatching method fastTemplateMatching.

/**
 * 采用图像金字塔算法快速找图
 *
 * @param img             图片
 * @param template        模板图片
 * @param matchMethod     匹配算法
 * @param weakThreshold   弱阈值。该值用于在每一轮模板匹配中检验是否继续匹配。如果相似度小于该值,则不再继续匹配。
 * @param strictThreshold 强阈值。该值用于检验最终匹配结果,以及在每一轮匹配中如果相似度大于该值则直接返回匹配结果。
 * @param maxLevel        图像金字塔的层数
 * @return
 */
public static Point fastTemplateMatching(Mat img, Mat template, int matchMethod, float weakThreshold, float strictThreshold, int maxLevel) {
    TimingLogger logger = new TimingLogger(LOG_TAG, "fast_tm");
    if (maxLevel == MAX_LEVEL_AUTO) {
        // 自动选取金字塔层数
        maxLevel = selectPyramidLevel(img, template);
        logger.addSplit("selectPyramidLevel:" + maxLevel);
    }
    // 保存每一轮匹配到模板图片在原图片的位置
    Point p = null;
    Mat matchResult;
    double similarity = 0;
    boolean isFirstMatching = true;
    for (int level = maxLevel; level >= 0; level--) {
        // 放缩图片
        Mat src = getPyramidDownAtLevel(img, level);
        Mat currentTemplate = getPyramidDownAtLevel(template, level);
        // 如果在上一轮中没有匹配到图片,则考虑是否退出匹配
        if (p == null) {
            // 如果不是第一次匹配,并且不满足shouldContinueMatching的条件,则直接退出匹配(返回null)
            if (!isFirstMatching && !shouldContinueMatching(level, maxLevel)) {
                break;
            }
            matchResult = matchTemplate(src, currentTemplate, matchMethod);
            Pair<Point, Double> bestMatched = getBestMatched(matchResult, matchMethod, weakThreshold);
            p = bestMatched.first;
            similarity = bestMatched.second;
        } else {
            // 根据上一轮的匹配点,计算本次匹配的区域
            Rect r = getROI(p, src, currentTemplate);
            matchResult = matchTemplate(new Mat(src, r), currentTemplate, matchMethod);
            Pair<Point, Double> bestMatched = getBestMatched(matchResult, matchMethod, weakThreshold);
            // 不满足弱阈值,返回null
            if (bestMatched.second < weakThreshold) {
            // p = null;
            // break;
            }
            p = bestMatched.first;
            similarity = bestMatched.second;
            p.x += r.x;
            p.y += r.y;
        }
        // 满足强阈值,返回当前结果
        if (similarity >= strictThreshold) {
            pyrUp(p, level);
            break;
        }
        logger.addSplit("level:" + level + " point:" + p);
        isFirstMatching = false;
    }
    logger.addSplit("result:" + p);
    logger.dumpToLog();
    if (similarity < strictThreshold) {
        return null;
    }
    return p;
}
Also used : Mat(org.opencv.core.Mat) Rect(org.opencv.core.Rect) TimingLogger(android.util.TimingLogger) Point(org.opencv.core.Point) Point(org.opencv.core.Point)

Example 8 with Rect

use of org.opencv.core.Rect in project Auto.js by hyb1996.

the class TemplateMatching method getROI.

private static Rect getROI(Point p, Mat src, Mat currentTemplate) {
    int x = (int) (p.x * 2 - currentTemplate.cols() / 4);
    x = Math.max(0, x);
    int y = (int) (p.y * 2 - currentTemplate.rows() / 4);
    y = Math.max(0, y);
    int w = (int) (currentTemplate.cols() * 1.5);
    int h = (int) (currentTemplate.rows() * 1.5);
    if (x + w >= src.cols()) {
        w = src.cols() - x - 1;
    }
    if (y + h >= src.rows()) {
        h = src.rows() - y - 1;
    }
    return new Rect(x, y, w, h);
}
Also used : Rect(org.opencv.core.Rect) Point(org.opencv.core.Point)

Example 9 with Rect

use of org.opencv.core.Rect in project Relic_Main by TeamOverdrive.

the class CryptoboxDetector method drawSlot.

public Point drawSlot(int slot, List<Rect> boxes) {
    // Get the pillar to the left
    Rect leftColumn = boxes.get(slot);
    // Get the pillar to the right
    Rect rightColumn = boxes.get(slot + 1);
    // Get the X Coord
    int leftX = leftColumn.x;
    // Get the X Coord
    int rightX = rightColumn.x;
    // Calculate the point between the two
    int drawX = ((rightX - leftX) / 2) + leftX;
    // Calculate Y Coord. We wont use this in our bot's opetation, buts its nice for drawing
    int drawY = leftColumn.height + leftColumn.y;
    return new Point(drawX, drawY);
}
Also used : Rect(org.opencv.core.Rect) Point(org.opencv.core.Point) MatOfPoint(org.opencv.core.MatOfPoint) Point(org.opencv.core.Point) MatOfPoint(org.opencv.core.MatOfPoint)

Example 10 with Rect

use of org.opencv.core.Rect in project Relic_Main by TeamOverdrive.

the class GenericDetector method processFrame.

@Override
public Mat processFrame(Mat rgba, Mat gray) {
    Size initSize = rgba.size();
    newSize = new Size(initSize.width * downScaleFactor, initSize.height * downScaleFactor);
    rgba.copyTo(workingMat);
    Imgproc.resize(workingMat, workingMat, newSize);
    if (rotateMat) {
        Mat tempBefore = workingMat.t();
        // mRgba.t() is the transpose
        Core.flip(tempBefore, workingMat, -1);
        tempBefore.release();
    }
    Mat preConvert = workingMat.clone();
    colorFilter.process(preConvert, mask);
    if (stretch) {
        structure = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT, stretchKernal);
        Imgproc.morphologyEx(mask, mask, Imgproc.MORPH_CLOSE, structure);
    }
    List<MatOfPoint> contours = new ArrayList<>();
    Imgproc.findContours(mask, contours, hiarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
    Imgproc.drawContours(workingMat, contours, -1, new Scalar(230, 70, 70), 2);
    Rect chosenRect = null;
    double chosenScore = Integer.MAX_VALUE;
    MatOfPoint2f approxCurve = new MatOfPoint2f();
    for (MatOfPoint c : contours) {
        MatOfPoint2f contour2f = new MatOfPoint2f(c.toArray());
        // Processing on mMOP2f1 which is in type MatOfPoint2f
        double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
        Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
        // Convert back to MatOfPoint
        MatOfPoint points = new MatOfPoint(approxCurve.toArray());
        // Get bounding rect of contour
        Rect rect = Imgproc.boundingRect(points);
        // You can find this by printing the area of each found rect, then looking and finding what u deem to be perfect.
        // Run this with the bot, on a balance board, with jewels in their desired location. Since jewels should mostly be
        // in the same position, this hack could work nicely.
        double area = Imgproc.contourArea(c);
        double areaDiffrence = 0;
        switch(detectionMode) {
            case MAX_AREA:
                areaDiffrence = -area * areaWeight;
                break;
            case PERFECT_AREA:
                areaDiffrence = Math.abs(perfectArea - area);
                break;
        }
        // Just declaring vars to make my life eassy
        double x = rect.x;
        double y = rect.y;
        double w = rect.width;
        double h = rect.height;
        Point centerPoint = new Point(x + (w / 2), y + (h / 2));
        // Get the ratio. We use max in case h and w get swapped??? it happens when u account for rotation
        double cubeRatio = Math.max(Math.abs(h / w), Math.abs(w / h));
        double ratioDiffrence = Math.abs(cubeRatio - perfectRatio);
        double finalDiffrence = (ratioDiffrence * ratioWeight) + (areaDiffrence * areaWeight);
        // Think of diffrence as score. 0 = perfect
        if (finalDiffrence < chosenScore && finalDiffrence < maxDiffrence && area > minArea) {
            chosenScore = finalDiffrence;
            chosenRect = rect;
        }
        if (debugContours && area > 100) {
            Imgproc.circle(workingMat, centerPoint, 3, new Scalar(0, 255, 255), 3);
            Imgproc.putText(workingMat, "Area: " + String.format("%.1f", area), centerPoint, 0, 0.5, new Scalar(0, 255, 255));
        }
    }
    if (chosenRect != null) {
        Imgproc.rectangle(workingMat, new Point(chosenRect.x, chosenRect.y), new Point(chosenRect.x + chosenRect.width, chosenRect.y + chosenRect.height), new Scalar(0, 255, 0), 3);
        Imgproc.putText(workingMat, "Result: " + String.format("%.2f", chosenScore), new Point(chosenRect.x - 5, chosenRect.y - 10), Core.FONT_HERSHEY_PLAIN, 1.3, new Scalar(0, 255, 0), 2);
        Point centerPoint = new Point(chosenRect.x + (chosenRect.width / 2), chosenRect.y + (chosenRect.height / 2));
        resultRect = chosenRect;
        resultLocation = centerPoint;
        resultFound = true;
    } else {
        resultFound = false;
        resultRect = null;
        resultLocation = null;
    }
    Imgproc.resize(workingMat, workingMat, initSize);
    preConvert.release();
    Imgproc.putText(workingMat, "DogeCV v1.1 Generic: " + newSize.toString() + " - " + speed.toString() + " - " + detectionMode.toString(), new Point(5, 30), 0, 1.2, new Scalar(0, 255, 255), 2);
    return workingMat;
}
Also used : Mat(org.opencv.core.Mat) Rect(org.opencv.core.Rect) Size(org.opencv.core.Size) MatOfPoint2f(org.opencv.core.MatOfPoint2f) ArrayList(java.util.ArrayList) MatOfPoint(org.opencv.core.MatOfPoint) Point(org.opencv.core.Point) MatOfPoint(org.opencv.core.MatOfPoint) Scalar(org.opencv.core.Scalar)

Aggregations

Rect (org.opencv.core.Rect)20 Mat (org.opencv.core.Mat)16 Point (org.opencv.core.Point)10 Scalar (org.opencv.core.Scalar)7 Size (org.opencv.core.Size)7 MatOfPoint (org.opencv.core.MatOfPoint)6 ArrayList (java.util.ArrayList)4 KeyPoint (org.opencv.core.KeyPoint)2 MatOfKeyPoint (org.opencv.core.MatOfKeyPoint)2 MatOfPoint2f (org.opencv.core.MatOfPoint2f)2 TimingLogger (android.util.TimingLogger)1 File (java.io.File)1 RotatedRect (org.opencv.core.RotatedRect)1