Search in sources :

Example 1 with Point2f

use of org.bytedeco.javacpp.opencv_core.Point2f in project myrobotlab by MyRobotLab.

the class DetectedFace method resolveCenterTriangle.

public Point2f resolveCenterTriangle() {
    // and array of 3 x,y points.
    int[][] ipts1 = new int[3][2];
    int centerleftx = getLeftEye().x() + getLeftEye().width() / 2;
    int centerlefty = getLeftEye().y() + getLeftEye().height() / 2;
    // right side center
    int centerrightx = getRightEye().x() + getRightEye().width() / 2;
    int centerrighty = getRightEye().y() + getRightEye().height() / 2;
    // mouth center.
    int centermouthx = getMouth().x() + getMouth().width() / 2;
    int centermouthy = getMouth().y() + getMouth().height() / 2;
    // point 1
    ipts1[0][0] = centerleftx;
    ipts1[0][1] = centerlefty;
    // point 2
    ipts1[1][0] = centerrightx;
    ipts1[1][1] = centerrighty;
    // point 3
    ipts1[2][0] = centermouthx;
    ipts1[2][1] = centermouthy;
    Point2f srcTri = new Point2f(3);
    // populate source triangle
    srcTri.position(0).x((float) centerleftx).y((float) centerlefty);
    srcTri.position(1).x((float) centerrightx).y((float) centerrighty);
    srcTri.position(2).x((float) centermouthx).y((float) centermouthy);
    return srcTri;
}
Also used : Point2f(org.bytedeco.javacpp.opencv_core.Point2f) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint) CvPoint(org.bytedeco.javacpp.opencv_core.CvPoint)

Example 2 with Point2f

use of org.bytedeco.javacpp.opencv_core.Point2f in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceDetect2 method process.

@Override
public IplImage process(IplImage image, OpenCVData data) throws InterruptedException {
    // convert to grayscale
    Frame grayFrame = makeGrayScale(image);
    // TODO: this seems super wonky! isn't there an easy way to go from IplImage
    // to opencv Mat?
    int cols = grayFrame.imageWidth;
    int rows = grayFrame.imageHeight;
    // convert to a Mat
    Mat bwImgMat = converterToIpl.convertToMat(grayFrame);
    // 
    // Find a bunch of faces and their features
    // extractDetectedFaces will only return a face if it has all the necessary
    // features (face, 2 eyes and 1 mouth)
    ArrayList<DetectedFace> dFaces = extractDetectedFaces(bwImgMat, cols, rows);
    // Ok, for each of these detected faces we should try to classify them.
    for (DetectedFace dF : dFaces) {
        if (dF.isComplete()) {
            // and array of 3 x,y points.
            // create the triangle from left->right->mouth center
            Point2f srcTri = dF.resolveCenterTriangle();
            Point2f dstTri = new Point2f(3);
            // populate dest triangle.
            dstTri.position(0).x((float) (dF.getFace().width() * .3)).y((float) (dF.getFace().height() * .45));
            dstTri.position(1).x((float) (dF.getFace().width() * .7)).y((float) (dF.getFace().height() * .45));
            dstTri.position(2).x((float) (dF.getFace().width() * .5)).y((float) (dF.getFace().height() * .85));
            // create the affine rotation/scale matrix
            Mat warpMat = getAffineTransform(srcTri.position(0), dstTri.position(0));
            // Mat dFaceMat = new Mat(bwImgMat, dF.getFace());
            Rect borderRect = dF.faceWithBorder(borderSize, cols, rows);
            Mat dFaceMat = new Mat(bwImgMat, borderRect);
            // so we don't loose the borders after the rotation
            if (doAffine) {
                warpAffine(dFaceMat, dFaceMat, warpMat, borderRect.size());
            }
            try {
                // TODO: why do i have to close these?!
                srcTri.close();
                dstTri.close();
            } catch (Exception e) {
                log.warn("Error releasing some OpenCV memory, you shouldn't see this: {}", e);
            // should we continue ?!
            }
            // highlight each of the faces we find.
            drawFaceRects(image, dF);
            data.setEyesDifference(dF.getRightEye().y() - dF.getLeftEye().y());
        }
    }
    // pass through/return the original image marked up.
    return image;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) Frame(org.bytedeco.javacv.Frame) CanvasFrame(org.bytedeco.javacv.CanvasFrame) Rect(org.bytedeco.javacpp.opencv_core.Rect) org.bytedeco.javacpp.opencv_imgproc.cvDrawRect(org.bytedeco.javacpp.opencv_imgproc.cvDrawRect) Point2f(org.bytedeco.javacpp.opencv_core.Point2f) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint)

Example 3 with Point2f

use of org.bytedeco.javacpp.opencv_core.Point2f in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceRecognizer method process.

@Override
public IplImage process(IplImage image, OpenCVData data) throws InterruptedException {
    // convert to grayscale
    Frame grayFrame = makeGrayScale(image);
    // TODO: this seems super wonky! isn't there an easy way to go from IplImage
    // to opencv Mat?
    int cols = grayFrame.imageWidth;
    int rows = grayFrame.imageHeight;
    // convert to a Mat
    Mat bwImgMat = converterToIpl.convertToMat(grayFrame);
    // 
    if (Mode.TRAIN.equals(mode)) {
        String status = "Training Mode: " + trainName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.GREEN);
    } else if (Mode.RECOGNIZE.equals(mode)) {
        String status = "Recognize Mode:" + lastRecognizedName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.YELLOW);
    }
    // 
    // Find a bunch of faces and their features
    // extractDetectedFaces will only return a face if it has all the necessary
    // features (face, 2 eyes and 1 mouth)
    ArrayList<DetectedFace> dFaces = extractDetectedFaces(bwImgMat, cols, rows);
    // Ok, for each of these detected faces we should try to classify them.
    for (DetectedFace dF : dFaces) {
        if (dF.isComplete()) {
            // and array of 3 x,y points.
            // create the triangle from left->right->mouth center
            Point2f srcTri = dF.resolveCenterTriangle();
            Point2f dstTri = new Point2f(3);
            // populate dest triangle.
            dstTri.position(0).x((float) (dF.getFace().width() * .3)).y((float) (dF.getFace().height() * .45));
            dstTri.position(1).x((float) (dF.getFace().width() * .7)).y((float) (dF.getFace().height() * .45));
            dstTri.position(2).x((float) (dF.getFace().width() * .5)).y((float) (dF.getFace().height() * .85));
            // create the affine rotation/scale matrix
            Mat warpMat = getAffineTransform(srcTri.position(0), dstTri.position(0));
            // Mat dFaceMat = new Mat(bwImgMat, dF.getFace());
            Rect borderRect = dF.faceWithBorder(borderSize, cols, rows);
            if (borderRect == null) {
                log.warn("Invalid face border found... parts were negative!");
                continue;
            }
            Mat dFaceMat = new Mat(bwImgMat, borderRect);
            // so we don't loose the borders after the rotation
            if (doAffine) {
                warpAffine(dFaceMat, dFaceMat, warpMat, borderRect.size());
            }
            try {
                // TODO: why do i have to close these?!
                srcTri.close();
                dstTri.close();
            } catch (Exception e) {
                log.warn("Error releasing some OpenCV memory, you shouldn't see this: {}", e);
            // should we continue ?!
            }
            // dFaceMat is the cropped and rotated face
            if (Mode.TRAIN.equals(mode)) {
                // we're in training mode.. so we should save the image
                log.info("Training Mode for {}.", trainName);
                if (!StringUtils.isEmpty(trainName)) {
                    try {
                        saveTrainingImage(trainName, dFaceMat);
                        cvPutText(image, "Snapshot Saved: " + trainName, cvPoint(20, 60), font, CvScalar.CYAN);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        cvPutText(image, "Error saving: " + trainName, cvPoint(20, 60), font, CvScalar.CYAN);
                        e.printStackTrace();
                    }
                }
            } else if (Mode.RECOGNIZE.equals(mode)) {
                // You bettah recognize!
                if (!trained) {
                    // we are a young grasshopper.
                    if (face) {
                        invoke("publishNoRecognizedFace");
                        face = false;
                    }
                    return image;
                } else {
                    face = true;
                    // Resize the face to pass it to the predicter
                    Mat dFaceMatSized = resizeImage(dFaceMat);
                    // If we're applying a mask, do it before the prediction
                    if (facemask != null) {
                        Mat maskedface = facemask.clone();
                        dFaceMatSized.copyTo(maskedface, facemask);
                        dFaceMatSized = maskedface;
                        if (debug) {
                            show(dFaceMatSized, "Masked Face");
                        }
                    }
                    int predictedLabel = faceRecognizer.predict(dFaceMatSized);
                    BytePointer bp = faceRecognizer.getLabelInfo(predictedLabel);
                    // TODO: what char encoding is this?!
                    String name = bp.getString();
                    log.info("Recognized a Face {} - {}", predictedLabel, name);
                    cvPutText(image, name, dF.resolveGlobalLowerLeftCorner(), font, CvScalar.CYAN);
                    // If it's a new name. invoke it an publish.
                    if (lastRecognizedName != name) {
                        invoke("publishRecognizedFace", name);
                    }
                    lastRecognizedName = name;
                }
            }
        }
        // highlight each of the faces we find.
        drawFaceRects(image, dF);
    }
    // pass through/return the original image marked up.
    return image;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) CanvasFrame(org.bytedeco.javacv.CanvasFrame) Frame(org.bytedeco.javacv.Frame) org.bytedeco.javacpp.opencv_imgproc.cvDrawRect(org.bytedeco.javacpp.opencv_imgproc.cvDrawRect) Rect(org.bytedeco.javacpp.opencv_core.Rect) Point2f(org.bytedeco.javacpp.opencv_core.Point2f) BytePointer(org.bytedeco.javacpp.BytePointer) IOException(java.io.IOException) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint) IOException(java.io.IOException)

Example 4 with Point2f

use of org.bytedeco.javacpp.opencv_core.Point2f in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceDetect2 method process.

@Override
public IplImage process(IplImage image, VisionData data) throws InterruptedException {
    // convert to grayscale
    Frame grayFrame = makeGrayScale(image);
    // TODO: this seems super wonky! isn't there an easy way to go from IplImage
    // to opencv Mat?
    int cols = grayFrame.imageWidth;
    int rows = grayFrame.imageHeight;
    // convert to a Mat
    Mat bwImgMat = converterToIpl.convertToMat(grayFrame);
    // 
    // Find a bunch of faces and their features
    // extractDetectedFaces will only return a face if it has all the necessary
    // features (face, 2 eyes and 1 mouth)
    ArrayList<DetectedFace> dFaces = extractDetectedFaces(bwImgMat, cols, rows);
    // Ok, for each of these detected faces we should try to classify them.
    for (DetectedFace dF : dFaces) {
        if (dF.isComplete()) {
            // and array of 3 x,y points.
            // create the triangle from left->right->mouth center
            Point2f srcTri = dF.resolveCenterTriangle();
            Point2f dstTri = new Point2f(3);
            // populate dest triangle.
            dstTri.position(0).x((float) (dF.getFace().width() * .3)).y((float) (dF.getFace().height() * .45));
            dstTri.position(1).x((float) (dF.getFace().width() * .7)).y((float) (dF.getFace().height() * .45));
            dstTri.position(2).x((float) (dF.getFace().width() * .5)).y((float) (dF.getFace().height() * .85));
            // create the affine rotation/scale matrix
            Mat warpMat = getAffineTransform(srcTri.position(0), dstTri.position(0));
            // Mat dFaceMat = new Mat(bwImgMat, dF.getFace());
            Rect borderRect = dF.faceWithBorder(borderSize, cols, rows);
            Mat dFaceMat = new Mat(bwImgMat, borderRect);
            // so we don't loose the borders after the rotation
            if (doAffine) {
                warpAffine(dFaceMat, dFaceMat, warpMat, borderRect.size());
            }
            try {
                // TODO: why do i have to close these?!
                srcTri.close();
                dstTri.close();
            } catch (Exception e) {
                log.warn("Error releasing some OpenCV memory, you shouldn't see this: {}", e);
            // should we continue ?!
            }
            // highlight each of the faces we find.
            drawFaceRects(image, dF);
            data.setEyesDifference(dF.getRightEye().y() - dF.getLeftEye().y());
        }
    }
    // pass through/return the original image marked up.
    return image;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) Frame(org.bytedeco.javacv.Frame) CanvasFrame(org.bytedeco.javacv.CanvasFrame) Rect(org.bytedeco.javacpp.opencv_core.Rect) org.bytedeco.javacpp.opencv_imgproc.cvDrawRect(org.bytedeco.javacpp.opencv_imgproc.cvDrawRect) Point2f(org.bytedeco.javacpp.opencv_core.Point2f) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint)

Example 5 with Point2f

use of org.bytedeco.javacpp.opencv_core.Point2f in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceRecognizer method process.

@Override
public IplImage process(IplImage image, VisionData data) throws InterruptedException {
    // convert to grayscale
    Frame grayFrame = makeGrayScale(image);
    // TODO: this seems super wonky! isn't there an easy way to go from IplImage
    // to opencv Mat?
    int cols = grayFrame.imageWidth;
    int rows = grayFrame.imageHeight;
    // convert to a Mat
    Mat bwImgMat = converterToIpl.convertToMat(grayFrame);
    // 
    if (Mode.TRAIN.equals(mode)) {
        String status = "Training Mode: " + trainName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.GREEN);
    } else if (Mode.RECOGNIZE.equals(mode)) {
        String status = "Recognize Mode:" + lastRecognizedName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.YELLOW);
    }
    // 
    // Find a bunch of faces and their features
    // extractDetectedFaces will only return a face if it has all the necessary
    // features (face, 2 eyes and 1 mouth)
    ArrayList<DetectedFace> dFaces = extractDetectedFaces(bwImgMat, cols, rows);
    // Ok, for each of these detected faces we should try to classify them.
    for (DetectedFace dF : dFaces) {
        if (dF.isComplete()) {
            // and array of 3 x,y points.
            // create the triangle from left->right->mouth center
            Point2f srcTri = dF.resolveCenterTriangle();
            Point2f dstTri = new Point2f(3);
            // populate dest triangle.
            dstTri.position(0).x((float) (dF.getFace().width() * .3)).y((float) (dF.getFace().height() * .45));
            dstTri.position(1).x((float) (dF.getFace().width() * .7)).y((float) (dF.getFace().height() * .45));
            dstTri.position(2).x((float) (dF.getFace().width() * .5)).y((float) (dF.getFace().height() * .85));
            // create the affine rotation/scale matrix
            Mat warpMat = getAffineTransform(srcTri.position(0), dstTri.position(0));
            // Mat dFaceMat = new Mat(bwImgMat, dF.getFace());
            Rect borderRect = dF.faceWithBorder(borderSize, cols, rows);
            Mat dFaceMat = new Mat(bwImgMat, borderRect);
            // so we don't loose the borders after the rotation
            if (doAffine) {
                warpAffine(dFaceMat, dFaceMat, warpMat, borderRect.size());
            }
            try {
                // TODO: why do i have to close these?!
                srcTri.close();
                dstTri.close();
            } catch (Exception e) {
                log.warn("Error releasing some OpenCV memory, you shouldn't see this: {}", e);
            // should we continue ?!
            }
            // dFaceMat is the cropped and rotated face
            if (Mode.TRAIN.equals(mode)) {
                // we're in training mode.. so we should save the image
                log.info("Training Mode for {}.", trainName);
                if (!StringUtils.isEmpty(trainName)) {
                    saveTrainingImage(trainName, dFaceMat);
                    cvPutText(image, "Snapshot Saved: " + trainName, cvPoint(20, 60), font, CvScalar.CYAN);
                }
            } else if (Mode.RECOGNIZE.equals(mode)) {
                // You bettah recognize!
                if (!trained) {
                    // we are a young grasshopper.
                    if (face) {
                        invoke("publishNoRecognizedFace");
                        face = false;
                    }
                    return image;
                } else {
                    face = true;
                    // Resize the face to pass it to the predicter
                    Mat dFaceMatSized = resizeImage(dFaceMat);
                    // If we're applying a mask, do it before the prediction
                    if (facemask != null) {
                        Mat maskedface = facemask.clone();
                        dFaceMatSized.copyTo(maskedface, facemask);
                        dFaceMatSized = maskedface;
                        if (debug) {
                            show(dFaceMatSized, "Masked Face");
                        }
                    }
                    int predictedLabel = faceRecognizer.predict(dFaceMatSized);
                    String name = Integer.toString(predictedLabel);
                    if (idToLabelMap.containsKey(predictedLabel)) {
                        name = idToLabelMap.get(predictedLabel);
                    } else {
                        // you shouldn't ever see this.
                        log.warn("Unknown predicted label returned! {}", predictedLabel);
                    }
                    log.info("Recognized a Face {} - {}", predictedLabel, name);
                    cvPutText(image, "Recognized:" + name, dF.resolveGlobalLowerLeftCorner(), font, CvScalar.CYAN);
                    // If it's a new name. invoke it an publish.
                    if (lastRecognizedName != name) {
                        invoke("publishRecognizedFace", name);
                    }
                    lastRecognizedName = name;
                }
            }
        }
        // highlight each of the faces we find.
        drawFaceRects(image, dF);
    }
    // pass through/return the original image marked up.
    return image;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) Frame(org.bytedeco.javacv.Frame) CanvasFrame(org.bytedeco.javacv.CanvasFrame) Rect(org.bytedeco.javacpp.opencv_core.Rect) org.bytedeco.javacpp.opencv_imgproc.cvDrawRect(org.bytedeco.javacpp.opencv_imgproc.cvDrawRect) Point2f(org.bytedeco.javacpp.opencv_core.Point2f) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint)

Aggregations

Point2f (org.bytedeco.javacpp.opencv_core.Point2f)6 org.bytedeco.javacpp.opencv_core.cvPoint (org.bytedeco.javacpp.opencv_core.cvPoint)6 Mat (org.bytedeco.javacpp.opencv_core.Mat)4 Rect (org.bytedeco.javacpp.opencv_core.Rect)4 org.bytedeco.javacpp.opencv_imgproc.cvDrawRect (org.bytedeco.javacpp.opencv_imgproc.cvDrawRect)4 CanvasFrame (org.bytedeco.javacv.CanvasFrame)4 Frame (org.bytedeco.javacv.Frame)4 CvPoint (org.bytedeco.javacpp.opencv_core.CvPoint)2 IOException (java.io.IOException)1 BytePointer (org.bytedeco.javacpp.BytePointer)1