Search in sources :

Example 76 with BytePointer

use of org.bytedeco.javacpp.BytePointer in project djl by deepjavalibrary.

the class JavacppUtils method createEagerSession.

@SuppressWarnings({ "unchecked", "try" })
public static TFE_Context createEagerSession(boolean async, int devicePlacementPolicy, ConfigProto config) {
    try (PointerScope ignored = new PointerScope()) {
        TFE_ContextOptions opts = TFE_ContextOptions.newContextOptions();
        TF_Status status = TF_Status.newStatus();
        if (config != null) {
            BytePointer configBytes = new BytePointer(config.toByteArray());
            tensorflow.TFE_ContextOptionsSetConfig(opts, configBytes, configBytes.capacity(), status);
            status.throwExceptionIfNotOK();
        }
        tensorflow.TFE_ContextOptionsSetAsync(opts, (byte) (async ? 1 : 0));
        tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(opts, devicePlacementPolicy);
        TFE_Context context = AbstractTFE_Context.newContext(opts, status);
        status.throwExceptionIfNotOK();
        return context.retainReference();
    }
}
Also used : TFE_Context(org.tensorflow.internal.c_api.TFE_Context) AbstractTFE_Context(org.tensorflow.internal.c_api.AbstractTFE_Context) TF_Status(org.tensorflow.internal.c_api.TF_Status) BytePointer(org.bytedeco.javacpp.BytePointer) PointerScope(org.bytedeco.javacpp.PointerScope) TFE_ContextOptions(org.tensorflow.internal.c_api.TFE_ContextOptions)

Example 77 with BytePointer

use of org.bytedeco.javacpp.BytePointer in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceRecognizer method train.

/**
 * This method will load all of the image files in a directory. The filename
 * will be parsed for the label to apply to the image. At least 2 different
 * labels must exist in the training set.
 *
 * @return true if the training was successful.
 * @throws IOException
 */
public boolean train() throws IOException {
    // 
    if (facemask == null) {
        File filterfile = new File("resource/facerec/Filter.png");
        if (!filterfile.exists()) {
            // work around to fix this so it works in eclipse
            filterfile = new File("src/resource/facerec/Filter.png");
        }
        // 
        if (!filterfile.exists()) {
            log.warn("No image filter file found.  {}", filterfile.getAbsolutePath());
        } else {
            // Read the filter and rescale it to the current image size
            // BytePointer fbp = new BytePointer(FileUtils.getFileAsBytes(filterfile.getAbsolutePath()));
            // Mat incomingfacemask = imread(fbp, CV_LOAD_IMAGE_GRAYSCALE);
            Mat incomingfacemask = imread(filterfile.getAbsolutePath(), CV_LOAD_IMAGE_GRAYSCALE);
            facemask = resizeImage(incomingfacemask);
            if (debug) {
                show(facemask, "Face Mask");
            }
        }
    }
    File root = new File(trainingDir);
    if (root.isFile()) {
        log.warn("Training directory was a file, not a directory.  {}", root.getAbsolutePath());
        return false;
    }
    if (!root.exists()) {
        log.info("Creating new training directory {}", root.getAbsolutePath());
        root.mkdirs();
    }
    log.info("Using {} for training data.", root.getAbsolutePath());
    ArrayList<File> imageFiles = listImageFiles(root);
    if (imageFiles.size() < 1) {
        log.info("No images found for training.");
        return false;
    }
    // Storage for the files that we load.
    MatVector images = new MatVector(imageFiles.size());
    // storage for the labels for the images
    Mat labels = new Mat(imageFiles.size(), 1, CV_32SC1);
    IntBuffer labelsBuf = labels.getIntBuffer();
    int counter = 0;
    // a map between the hashcode and the string label
    HashMap<Integer, String> idToLabelMap = new HashMap<Integer, String>();
    for (File image : imageFiles) {
        // load the image
        log.info("Loading training image file: {}", image.getAbsolutePath());
        // we know that imread doesn't work with non-ascii file paths.. so we want to use a different
        // so, load the image into memory, warp it in a byte pointer and pass it to imdecode to load the image from memory, instead of from disk
        byte[] tmpImg = FileUtils.getFileAsBytes(image);
        Mat img = imdecode(new Mat(new BytePointer(tmpImg)), CV_LOAD_IMAGE_GRAYSCALE);
        // IplImage tempImg = cvLoadImage(image.getAbsolutePath());
        // Mat img = converterToMat.convertToMat(converterToMat.convert(tempImg));
        // The directory name is the label.
        String personName = image.getParentFile().getName();
        // String personName = UnicodeFolder.get(image.getParentFile().getName());
        // TODO: we need an integer to represent this string .. for now we're
        // using a hashcode here.
        // this can definitely have a collision!
        // we really need a better metadata store for these images.
        int label = personName.hashCode();
        // make sure all our test images are resized
        Mat resized = resizeImage(img);
        // 
        if (facemask != null) {
            Mat maskedface = facemask.clone();
            resized.copyTo(maskedface, facemask);
            resized = maskedface;
        }
        // TODO: add a debug method to show the image
        if (debug) {
            show(resized, personName);
        }
        // TODO: our training images are indexed by integer,
        images.put(counter, resized);
        labelsBuf.put(counter, label);
        // keep track of what string the hash code maps to.
        idToLabelMap.put(label, personName);
        counter++;
    }
    initRecognizer();
    // must be at least 2 things to classify, is it A or B ?
    if (idToLabelMap.keySet().size() > 1) {
        faceRecognizer.train(images, labels);
        trained = true;
    } else {
        log.info("No labeled images loaded. training skipped.");
        trained = false;
    }
    // populate the human readable labels.
    for (int k : idToLabelMap.keySet()) {
        faceRecognizer.setLabelInfo(k, idToLabelMap.get(k));
    }
    return true;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) HashMap(java.util.HashMap) IntBuffer(java.nio.IntBuffer) BytePointer(org.bytedeco.javacpp.BytePointer) MatVector(org.bytedeco.javacpp.opencv_core.MatVector) File(java.io.File) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint)

Example 78 with BytePointer

use of org.bytedeco.javacpp.BytePointer in project myrobotlab by MyRobotLab.

the class OpenCVFilterFaceRecognizer method process.

@Override
public IplImage process(IplImage image, OpenCVData data) throws InterruptedException {
    // convert to grayscale
    Frame grayFrame = makeGrayScale(image);
    // TODO: this seems super wonky! isn't there an easy way to go from IplImage
    // to opencv Mat?
    int cols = grayFrame.imageWidth;
    int rows = grayFrame.imageHeight;
    // convert to a Mat
    Mat bwImgMat = converterToIpl.convertToMat(grayFrame);
    // 
    if (Mode.TRAIN.equals(mode)) {
        String status = "Training Mode: " + trainName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.GREEN);
    } else if (Mode.RECOGNIZE.equals(mode)) {
        String status = "Recognize Mode:" + lastRecognizedName;
        cvPutText(image, status, cvPoint(20, 40), font, CvScalar.YELLOW);
    }
    // 
    // Find a bunch of faces and their features
    // extractDetectedFaces will only return a face if it has all the necessary
    // features (face, 2 eyes and 1 mouth)
    ArrayList<DetectedFace> dFaces = extractDetectedFaces(bwImgMat, cols, rows);
    // Ok, for each of these detected faces we should try to classify them.
    for (DetectedFace dF : dFaces) {
        if (dF.isComplete()) {
            // and array of 3 x,y points.
            // create the triangle from left->right->mouth center
            Point2f srcTri = dF.resolveCenterTriangle();
            Point2f dstTri = new Point2f(3);
            // populate dest triangle.
            dstTri.position(0).x((float) (dF.getFace().width() * .3)).y((float) (dF.getFace().height() * .45));
            dstTri.position(1).x((float) (dF.getFace().width() * .7)).y((float) (dF.getFace().height() * .45));
            dstTri.position(2).x((float) (dF.getFace().width() * .5)).y((float) (dF.getFace().height() * .85));
            // create the affine rotation/scale matrix
            Mat warpMat = getAffineTransform(srcTri.position(0), dstTri.position(0));
            // Mat dFaceMat = new Mat(bwImgMat, dF.getFace());
            Rect borderRect = dF.faceWithBorder(borderSize, cols, rows);
            if (borderRect == null) {
                log.warn("Invalid face border found... parts were negative!");
                continue;
            }
            Mat dFaceMat = new Mat(bwImgMat, borderRect);
            // so we don't loose the borders after the rotation
            if (doAffine) {
                warpAffine(dFaceMat, dFaceMat, warpMat, borderRect.size());
            }
            try {
                // TODO: why do i have to close these?!
                srcTri.close();
                dstTri.close();
            } catch (Exception e) {
                log.warn("Error releasing some OpenCV memory, you shouldn't see this: {}", e);
            // should we continue ?!
            }
            // dFaceMat is the cropped and rotated face
            if (Mode.TRAIN.equals(mode)) {
                // we're in training mode.. so we should save the image
                log.info("Training Mode for {}.", trainName);
                if (!StringUtils.isEmpty(trainName)) {
                    try {
                        saveTrainingImage(trainName, dFaceMat);
                        cvPutText(image, "Snapshot Saved: " + trainName, cvPoint(20, 60), font, CvScalar.CYAN);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        cvPutText(image, "Error saving: " + trainName, cvPoint(20, 60), font, CvScalar.CYAN);
                        e.printStackTrace();
                    }
                }
            } else if (Mode.RECOGNIZE.equals(mode)) {
                // You bettah recognize!
                if (!trained) {
                    // we are a young grasshopper.
                    if (face) {
                        invoke("publishNoRecognizedFace");
                        face = false;
                    }
                    return image;
                } else {
                    face = true;
                    // Resize the face to pass it to the predicter
                    Mat dFaceMatSized = resizeImage(dFaceMat);
                    // If we're applying a mask, do it before the prediction
                    if (facemask != null) {
                        Mat maskedface = facemask.clone();
                        dFaceMatSized.copyTo(maskedface, facemask);
                        dFaceMatSized = maskedface;
                        if (debug) {
                            show(dFaceMatSized, "Masked Face");
                        }
                    }
                    int predictedLabel = faceRecognizer.predict(dFaceMatSized);
                    BytePointer bp = faceRecognizer.getLabelInfo(predictedLabel);
                    // TODO: what char encoding is this?!
                    String name = bp.getString();
                    log.info("Recognized a Face {} - {}", predictedLabel, name);
                    cvPutText(image, name, dF.resolveGlobalLowerLeftCorner(), font, CvScalar.CYAN);
                    // If it's a new name. invoke it an publish.
                    if (lastRecognizedName != name) {
                        invoke("publishRecognizedFace", name);
                    }
                    lastRecognizedName = name;
                }
            }
        }
        // highlight each of the faces we find.
        drawFaceRects(image, dF);
    }
    // pass through/return the original image marked up.
    return image;
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) CanvasFrame(org.bytedeco.javacv.CanvasFrame) Frame(org.bytedeco.javacv.Frame) org.bytedeco.javacpp.opencv_imgproc.cvDrawRect(org.bytedeco.javacpp.opencv_imgproc.cvDrawRect) Rect(org.bytedeco.javacpp.opencv_core.Rect) Point2f(org.bytedeco.javacpp.opencv_core.Point2f) BytePointer(org.bytedeco.javacpp.BytePointer) IOException(java.io.IOException) org.bytedeco.javacpp.opencv_core.cvPoint(org.bytedeco.javacpp.opencv_core.cvPoint) IOException(java.io.IOException)

Example 79 with BytePointer

use of org.bytedeco.javacpp.BytePointer in project myrobotlab by MyRobotLab.

the class OpenCVFilterLKOpticalTrack method imageChanged.

@Override
public void imageChanged(IplImage image) {
    eig = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
    tmp = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
    imgB = IplImage.create(imageSize, 8, 1);
    imgA = IplImage.create(imageSize, 8, 1);
    if (channels == 3) {
        cvCvtColor(image, imgB, CV_BGR2GRAY);
        cvCopy(imgB, imgA);
    }
    cornersA = new CvPoint2D32f(maxPointCount);
    cornersB = new CvPoint2D32f(maxPointCount);
    cornersC = new CvPoint2D32f(maxPointCount);
    // Call Lucas Kanade algorithm
    features_found = new BytePointer(maxPointCount);
    feature_errors = new FloatPointer(maxPointCount);
}
Also used : FloatPointer(org.bytedeco.javacpp.FloatPointer) BytePointer(org.bytedeco.javacpp.BytePointer) CvPoint2D32f(org.bytedeco.javacpp.opencv_core.CvPoint2D32f)

Example 80 with BytePointer

use of org.bytedeco.javacpp.BytePointer in project myrobotlab by MyRobotLab.

the class OpenCVFilterLKOpticalTrack method imageChanged.

@Override
public void imageChanged(IplImage image) {
    eig = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
    tmp = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
    imgB = IplImage.create(imageSize, 8, 1);
    imgA = IplImage.create(imageSize, 8, 1);
    if (channels == 3) {
        cvCvtColor(image, imgB, CV_BGR2GRAY);
        cvCopy(imgB, imgA);
    }
    cornersA = new CvPoint2D32f(maxPointCount);
    cornersB = new CvPoint2D32f(maxPointCount);
    cornersC = new CvPoint2D32f(maxPointCount);
    // Call Lucas Kanade algorithm
    features_found = new BytePointer(maxPointCount);
    feature_errors = new FloatPointer(maxPointCount);
}
Also used : FloatPointer(org.bytedeco.javacpp.FloatPointer) BytePointer(org.bytedeco.javacpp.BytePointer) CvPoint2D32f(org.bytedeco.javacpp.opencv_core.CvPoint2D32f)

Aggregations

BytePointer (org.bytedeco.javacpp.BytePointer)84 IntPointer (org.bytedeco.javacpp.IntPointer)23 ByteBuffer (java.nio.ByteBuffer)20 PointerPointer (org.bytedeco.javacpp.PointerPointer)20 IOException (java.io.IOException)16 Pointer (org.bytedeco.javacpp.Pointer)16 PointerScope (org.bytedeco.javacpp.PointerScope)13 DoublePointer (org.bytedeco.javacpp.DoublePointer)12 FloatPointer (org.bytedeco.javacpp.FloatPointer)12 CompressedDataBuffer (org.nd4j.linalg.compression.CompressedDataBuffer)10 CompressionDescriptor (org.nd4j.linalg.compression.CompressionDescriptor)10 ShortBuffer (java.nio.ShortBuffer)9 ShortPointer (org.bytedeco.javacpp.ShortPointer)9 IntBuffer (java.nio.IntBuffer)7 DoubleBuffer (java.nio.DoubleBuffer)6 FloatBuffer (java.nio.FloatBuffer)6 Nonnull (javax.annotation.Nonnull)5 LongPointer (org.bytedeco.javacpp.LongPointer)5 TF_Status (org.tensorflow.internal.c_api.TF_Status)4 ByteOrder (java.nio.ByteOrder)3