use of org.opencv.core.Point in project Frankenstein by olir.
the class TestImageInput method drawTestImage.
private void drawTestImage(int xoffset, int yoffset, int width, int height) {
int count = 10;
int gridSize = ((height / count) >> 1) << 1;
while (gridSize >= 8) gridSize >>= 1;
int xmid = xoffset + (width >> 1);
Imgproc.line(testFrame, new Point(xmid, yoffset), new Point(xmid, yoffset + height - 1), white, 3);
for (int x = xmid + gridSize; x < xoffset + width; x += gridSize) {
Imgproc.line(testFrame, new Point(x, yoffset), new Point(x, yoffset + height - 1), white, 1);
}
for (int x = xmid - gridSize; x > xoffset; x -= gridSize) {
Imgproc.line(testFrame, new Point(x, yoffset), new Point(x, yoffset + height - 1), white, 1);
}
int ymid = yoffset + (height >> 1);
Imgproc.line(testFrame, new Point(xoffset, ymid), new Point(xoffset + width - 1, ymid), white, 3);
for (int y = ymid + gridSize; y < yoffset + height; y += gridSize) {
Imgproc.line(testFrame, new Point(xoffset, y), new Point(xoffset + width - 1, y), white, 1);
}
for (int y = ymid - gridSize; y > yoffset; y -= gridSize) {
Imgproc.line(testFrame, new Point(xoffset, y), new Point(xoffset + width - 1, y), white, 1);
}
Imgproc.line(testFrame, new Point(xoffset, yoffset), new Point(xoffset + width - 1, yoffset + height - 1), red, 1);
Imgproc.putText(testFrame, "" + width + " x " + height, new Point(xmid - gridSize * 1.33, ymid - gridSize * 0.25), Core.FONT_HERSHEY_PLAIN, 4.0, red, 3);
}
use of org.opencv.core.Point in project Frankenstein by olir.
the class TestImageInput method process.
@Override
public Mat process(Mat sourceFrame, int frameId, FilterContext context) {
testFrame.copyTo(newFrame);
System.out.println("process Frame #" + frameId);
Imgproc.putText(newFrame, "Frame #" + frameId, new Point(10, smallHeight - 10), Core.FONT_HERSHEY_PLAIN, 3.0, red, 2);
return newFrame;
}
use of org.opencv.core.Point in project Auto.js by hyb1996.
the class ColorFinder method findColor.
public Point findColor(ImageWrapper image, int color, int threshold, Rect rect) {
MatOfPoint matOfPoint = findColorInner(image, color, threshold, rect);
if (matOfPoint == null) {
return null;
}
Point point = matOfPoint.toArray()[0];
if (rect != null) {
point.x = mScreenMetrics.scaleX((int) (point.x + rect.x));
point.y = mScreenMetrics.scaleX((int) (point.y + rect.y));
}
return point;
}
use of org.opencv.core.Point in project Auto.js by hyb1996.
the class ColorFinder method findAllPointsForColor.
public Point[] findAllPointsForColor(ImageWrapper image, int color, int threshold, Rect rect) {
MatOfPoint matOfPoint = findColorInner(image, color, threshold, rect);
if (matOfPoint == null) {
return new Point[0];
}
Point[] points = matOfPoint.toArray();
if (rect != null) {
for (int i = 0; i < points.length; i++) {
points[i].x = mScreenMetrics.scaleX((int) (points[i].x + rect.x));
points[i].y = mScreenMetrics.scaleX((int) (points[i].y + rect.y));
}
}
return points;
}
use of org.opencv.core.Point in project Auto.js by hyb1996.
the class TemplateMatching method fastTemplateMatching.
/**
* 采用图像金字塔算法快速找图
*
* @param img 图片
* @param template 模板图片
* @param matchMethod 匹配算法
* @param weakThreshold 弱阈值。该值用于在每一轮模板匹配中检验是否继续匹配。如果相似度小于该值,则不再继续匹配。
* @param strictThreshold 强阈值。该值用于检验最终匹配结果,以及在每一轮匹配中如果相似度大于该值则直接返回匹配结果。
* @param maxLevel 图像金字塔的层数
* @return
*/
public static Point fastTemplateMatching(Mat img, Mat template, int matchMethod, float weakThreshold, float strictThreshold, int maxLevel) {
TimingLogger logger = new TimingLogger(LOG_TAG, "fast_tm");
if (maxLevel == MAX_LEVEL_AUTO) {
// 自动选取金字塔层数
maxLevel = selectPyramidLevel(img, template);
logger.addSplit("selectPyramidLevel:" + maxLevel);
}
// 保存每一轮匹配到模板图片在原图片的位置
Point p = null;
Mat matchResult;
double similarity = 0;
boolean isFirstMatching = true;
for (int level = maxLevel; level >= 0; level--) {
// 放缩图片
Mat src = getPyramidDownAtLevel(img, level);
Mat currentTemplate = getPyramidDownAtLevel(template, level);
// 如果在上一轮中没有匹配到图片,则考虑是否退出匹配
if (p == null) {
// 如果不是第一次匹配,并且不满足shouldContinueMatching的条件,则直接退出匹配(返回null)
if (!isFirstMatching && !shouldContinueMatching(level, maxLevel)) {
break;
}
matchResult = matchTemplate(src, currentTemplate, matchMethod);
Pair<Point, Double> bestMatched = getBestMatched(matchResult, matchMethod, weakThreshold);
p = bestMatched.first;
similarity = bestMatched.second;
} else {
// 根据上一轮的匹配点,计算本次匹配的区域
Rect r = getROI(p, src, currentTemplate);
matchResult = matchTemplate(new Mat(src, r), currentTemplate, matchMethod);
Pair<Point, Double> bestMatched = getBestMatched(matchResult, matchMethod, weakThreshold);
// 不满足弱阈值,返回null
if (bestMatched.second < weakThreshold) {
// p = null;
// break;
}
p = bestMatched.first;
similarity = bestMatched.second;
p.x += r.x;
p.y += r.y;
}
// 满足强阈值,返回当前结果
if (similarity >= strictThreshold) {
pyrUp(p, level);
break;
}
logger.addSplit("level:" + level + " point:" + p);
isFirstMatching = false;
}
logger.addSplit("result:" + p);
logger.dumpToLog();
if (similarity < strictThreshold) {
return null;
}
return p;
}
Aggregations