use of org.opencv.core.Point in project Relic_Main by TeamOverdrive.
the class Imgproc method floodFill.
//
// C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
//
// javadoc: floodFill(image, mask, seedPoint, newVal, rect, loDiff, upDiff, flags)
public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) {
double[] rect_out = new double[4];
int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags);
if (rect != null) {
rect.x = (int) rect_out[0];
rect.y = (int) rect_out[1];
rect.width = (int) rect_out[2];
rect.height = (int) rect_out[3];
}
return retVal;
}
use of org.opencv.core.Point in project Relic_Main by TeamOverdrive.
the class Imgproc method phaseCorrelate.
//
// C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
//
// javadoc: phaseCorrelate(src1, src2, window, response)
public static Point phaseCorrelate(Mat src1, Mat src2, Mat window, double[] response) {
double[] response_out = new double[1];
Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out));
if (response != null)
response[0] = (double) response_out[0];
return retVal;
}
use of org.opencv.core.Point in project Relic_Main by TeamOverdrive.
the class Converters method Mat_to_vector_Point.
public static void Mat_to_vector_Point(Mat m, List<Point> pts) {
if (pts == null)
throw new java.lang.IllegalArgumentException("Output List can't be null");
int count = m.rows();
int type = m.type();
if (m.cols() != 1)
throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m);
pts.clear();
if (type == CvType.CV_32SC2) {
int[] buff = new int[2 * count];
m.get(0, 0, buff);
for (int i = 0; i < count; i++) {
pts.add(new Point(buff[i * 2], buff[i * 2 + 1]));
}
} else if (type == CvType.CV_32FC2) {
float[] buff = new float[2 * count];
m.get(0, 0, buff);
for (int i = 0; i < count; i++) {
pts.add(new Point(buff[i * 2], buff[i * 2 + 1]));
}
} else if (type == CvType.CV_64FC2) {
double[] buff = new double[2 * count];
m.get(0, 0, buff);
for (int i = 0; i < count; i++) {
pts.add(new Point(buff[i * 2], buff[i * 2 + 1]));
}
} else {
throw new java.lang.IllegalArgumentException("Input Mat should be of CV_32SC2, CV_32FC2 or CV_64FC2 type\n" + m);
}
}
use of org.opencv.core.Point in project kifu-recorder by leonardost.
the class RegistrarPartidaActivity method processarCantosDoTabuleiro.
private void processarCantosDoTabuleiro() {
posicaoDoTabuleiroNaImagem = new Mat(4, 1, CvType.CV_32FC2);
posicaoDoTabuleiroNaImagem.put(0, 0, cantosDoTabuleiro[0], cantosDoTabuleiro[1], cantosDoTabuleiro[2], cantosDoTabuleiro[3], cantosDoTabuleiro[4], cantosDoTabuleiro[5], cantosDoTabuleiro[6], cantosDoTabuleiro[7]);
Point[] cantos = new Point[4];
cantos[0] = new Point(cantosDoTabuleiro[0], cantosDoTabuleiro[1]);
cantos[1] = new Point(cantosDoTabuleiro[2], cantosDoTabuleiro[3]);
cantos[2] = new Point(cantosDoTabuleiro[4], cantosDoTabuleiro[5]);
cantos[3] = new Point(cantosDoTabuleiro[6], cantosDoTabuleiro[7]);
contornoDoTabuleiro = new MatOfPoint(cantos);
}
use of org.opencv.core.Point in project kifu-recorder by leonardost.
the class DetectorDeTabuleiro method detectarContornos.
private List<MatOfPoint> detectarContornos(Mat imagemComBordasEmEvidencia) {
// Os contornos delimitados pelas linhas são encontrados
List<MatOfPoint> contornos = new ArrayList<>();
Mat hierarquia = new Mat();
Imgproc.findContours(imagemComBordasEmEvidencia, contornos, hierarquia, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0, 0));
Log.d("kifu-recorder", "Número de contornos encontrados: " + contornos.size());
// Remove os contornos muito pequenos, que provavelmente são ruído
for (Iterator<MatOfPoint> it = contornos.iterator(); it.hasNext(); ) {
MatOfPoint contorno = it.next();
// O ideal seria fazer isto aqui como uma razão sobre a área da imagem
if (Imgproc.contourArea(contorno) < 700) {
it.remove();
}
}
// A imagem é convertida para um formato colorido novamente
Imgproc.cvtColor(imagemComBordasEmEvidencia, imagem, Imgproc.COLOR_GRAY2BGR, 4);
imagemComBordasEmEvidencia.release();
return contornos;
}
Aggregations