Search in sources :

Example 1 with IntPointer

use of org.bytedeco.javacpp.IntPointer in project bigbluebutton by bigbluebutton.

the class FFmpegFrameRecorder method startUnsafe.

void startUnsafe() throws Exception {
    int ret;
    picture = null;
    tmp_picture = null;
    picture_buf = null;
    frame = null;
    video_outbuf = null;
    audio_outbuf = null;
    oc = new AVFormatContext(null);
    video_c = null;
    audio_c = null;
    video_st = null;
    audio_st = null;
    got_video_packet = new int[1];
    got_audio_packet = new int[1];
    /* auto detect the output format from the name. */
    String format_name = format == null || format.length() == 0 ? null : format;
    if ((oformat = av_guess_format(format_name, filename, null)) == null) {
        int proto = filename.indexOf("://");
        if (proto > 0) {
            format_name = filename.substring(0, proto);
        }
        if ((oformat = av_guess_format(format_name, filename, null)) == null) {
            throw new Exception("av_guess_format() error: Could not guess output format for \"" + filename + "\" and " + format + " format.");
        }
    }
    format_name = oformat.name().getString();
    /* allocate the output media context */
    if (avformat_alloc_output_context2(oc, null, format_name, filename) < 0) {
        throw new Exception("avformat_alloc_context2() error:\tCould not allocate format context");
    }
    oc.oformat(oformat);
    oc.filename().putString(filename);
    /* add the audio and video streams using the format codecs
           and initialize the codecs */
    AVStream inpVideoStream = null, inpAudioStream = null;
    if (ifmt_ctx != null) {
        // get input video and audio stream indices from ifmt_ctx
        for (int idx = 0; idx < ifmt_ctx.nb_streams(); idx++) {
            AVStream inputStream = ifmt_ctx.streams(idx);
            if (inputStream.codec().codec_type() == AVMEDIA_TYPE_VIDEO) {
                inpVideoStream = inputStream;
                videoCodec = inpVideoStream.codec().codec_id();
                if (inpVideoStream.r_frame_rate().num() != AV_NOPTS_VALUE && inpVideoStream.r_frame_rate().den() != 0) {
                    frameRate = (inpVideoStream.r_frame_rate().num()) / (inpVideoStream.r_frame_rate().den());
                }
            } else if (inputStream.codec().codec_type() == AVMEDIA_TYPE_AUDIO) {
                inpAudioStream = inputStream;
                audioCodec = inpAudioStream.codec().codec_id();
            }
        }
    }
    if (imageWidth > 0 && imageHeight > 0) {
        if (videoCodec != AV_CODEC_ID_NONE) {
            oformat.video_codec(videoCodec);
        } else if ("flv".equals(format_name)) {
            oformat.video_codec(AV_CODEC_ID_FLV1);
        } else if ("mp4".equals(format_name)) {
            oformat.video_codec(AV_CODEC_ID_MPEG4);
        } else if ("3gp".equals(format_name)) {
            oformat.video_codec(AV_CODEC_ID_H263);
        } else if ("avi".equals(format_name)) {
            oformat.video_codec(AV_CODEC_ID_HUFFYUV);
        }
        /* find the video encoder */
        if ((video_codec = avcodec_find_encoder_by_name(videoCodecName)) == null && (video_codec = avcodec_find_encoder(oformat.video_codec())) == null) {
            release();
            throw new Exception("avcodec_find_encoder() error: Video codec not found.");
        }
        oformat.video_codec(video_codec.id());
        AVRational frame_rate = av_d2q(frameRate, 1001000);
        AVRational supported_framerates = video_codec.supported_framerates();
        if (supported_framerates != null) {
            int idx = av_find_nearest_q_idx(frame_rate, supported_framerates);
            frame_rate = supported_framerates.position(idx);
        }
        /* add a video output stream */
        if ((video_st = avformat_new_stream(oc, video_codec)) == null) {
            release();
            throw new Exception("avformat_new_stream() error: Could not allocate video stream.");
        }
        video_c = video_st.codec();
        if (inpVideoStream != null) {
            if ((ret = avcodec_copy_context(video_st.codec(), inpVideoStream.codec())) < 0) {
                release();
                throw new Exception("avcodec_copy_context() error:\tFailed to copy context from input to output stream codec context");
            }
            videoBitrate = (int) inpVideoStream.codec().bit_rate();
            pixelFormat = inpVideoStream.codec().pix_fmt();
            aspectRatio = inpVideoStream.codec().sample_aspect_ratio().den() / inpVideoStream.codec().sample_aspect_ratio().den() * 1.d;
            videoQuality = inpVideoStream.codec().global_quality();
            video_c.codec_tag(0);
        }
        video_c.codec_id(oformat.video_codec());
        video_c.codec_type(AVMEDIA_TYPE_VIDEO);
        /* put sample parameters */
        video_c.bit_rate(videoBitrate);
        /* resolution must be a multiple of two, but round up to 16 as often required */
        video_c.width((imageWidth + 15) / 16 * 16);
        video_c.height(imageHeight);
        if (aspectRatio > 0) {
            AVRational r = av_d2q(aspectRatio, 255);
            video_c.sample_aspect_ratio(r);
            video_st.sample_aspect_ratio(r);
        }
        /* time base: this is the fundamental unit of time (in seconds) in terms
               of which frame timestamps are represented. for fixed-fps content,
               timebase should be 1/framerate and timestamp increments should be
               identically 1. */
        video_c.time_base(av_inv_q(frame_rate));
        video_st.time_base(av_inv_q(frame_rate));
        if (gopSize >= 0) {
            video_c.gop_size(gopSize);
        /* emit one intra frame every gopSize frames at most */
        }
        if (videoQuality >= 0) {
            video_c.flags(video_c.flags() | CODEC_FLAG_QSCALE);
            video_c.global_quality((int) Math.round(FF_QP2LAMBDA * videoQuality));
        }
        if (pixelFormat != AV_PIX_FMT_NONE) {
            video_c.pix_fmt(pixelFormat);
        } else if (video_c.codec_id() == AV_CODEC_ID_RAWVIDEO || video_c.codec_id() == AV_CODEC_ID_PNG || video_c.codec_id() == AV_CODEC_ID_HUFFYUV || video_c.codec_id() == AV_CODEC_ID_FFV1) {
            // appropriate for common lossless formats
            video_c.pix_fmt(AV_PIX_FMT_RGB32);
        } else {
            // lossy, but works with about everything
            video_c.pix_fmt(AV_PIX_FMT_YUV420P);
        }
        if (video_c.codec_id() == AV_CODEC_ID_MPEG2VIDEO) {
            /* just for testing, we also add B frames */
            video_c.max_b_frames(2);
        } else if (video_c.codec_id() == AV_CODEC_ID_MPEG1VIDEO) {
            /* Needed to avoid using macroblocks in which some coeffs overflow.
                   This does not happen with normal video, it just happens here as
                   the motion of the chroma plane does not match the luma plane. */
            video_c.mb_decision(2);
        } else if (video_c.codec_id() == AV_CODEC_ID_H263) {
            // H.263 does not support any other resolution than the following
            if (imageWidth <= 128 && imageHeight <= 96) {
                video_c.width(128).height(96);
            } else if (imageWidth <= 176 && imageHeight <= 144) {
                video_c.width(176).height(144);
            } else if (imageWidth <= 352 && imageHeight <= 288) {
                video_c.width(352).height(288);
            } else if (imageWidth <= 704 && imageHeight <= 576) {
                video_c.width(704).height(576);
            } else {
                video_c.width(1408).height(1152);
            }
        } else if (video_c.codec_id() == AV_CODEC_ID_H264) {
            // default to constrained baseline to produce content that plays back on anything,
            // without any significant tradeoffs for most use cases
            video_c.profile(AVCodecContext.FF_PROFILE_H264_CONSTRAINED_BASELINE);
        }
        // some formats want stream headers to be separate
        if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
            video_c.flags(video_c.flags() | CODEC_FLAG_GLOBAL_HEADER);
        }
        if ((video_codec.capabilities() & CODEC_CAP_EXPERIMENTAL) != 0) {
            video_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
        }
    }
    /*
         * add an audio output stream
         */
    if (audioChannels > 0 && audioBitrate > 0 && sampleRate > 0) {
        if (audioCodec != AV_CODEC_ID_NONE) {
            oformat.audio_codec(audioCodec);
        } else if ("flv".equals(format_name) || "mp4".equals(format_name) || "3gp".equals(format_name)) {
            oformat.audio_codec(AV_CODEC_ID_AAC);
        } else if ("avi".equals(format_name)) {
            oformat.audio_codec(AV_CODEC_ID_PCM_S16LE);
        }
        /* find the audio encoder */
        if ((audio_codec = avcodec_find_encoder_by_name(audioCodecName)) == null && (audio_codec = avcodec_find_encoder(oformat.audio_codec())) == null) {
            release();
            throw new Exception("avcodec_find_encoder() error: Audio codec not found.");
        }
        oformat.audio_codec(audio_codec.id());
        if ((audio_st = avformat_new_stream(oc, audio_codec)) == null) {
            release();
            throw new Exception("avformat_new_stream() error: Could not allocate audio stream.");
        }
        audio_c = audio_st.codec();
        if (inpAudioStream != null && audioChannels > 0) {
            if ((ret = avcodec_copy_context(audio_st.codec(), inpAudioStream.codec())) < 0) {
                throw new Exception("avcodec_copy_context() error:\tFailed to copy context from input audio to output audio stream codec context\n");
            }
            audioBitrate = (int) inpAudioStream.codec().bit_rate();
            sampleRate = inpAudioStream.codec().sample_rate();
            audioChannels = inpAudioStream.codec().channels();
            sampleFormat = inpAudioStream.codec().sample_fmt();
            audioQuality = inpAudioStream.codec().global_quality();
            audio_c.codec_tag(0);
            audio_st.pts(inpAudioStream.pts());
            audio_st.duration(inpAudioStream.duration());
            audio_st.time_base().num(inpAudioStream.time_base().num());
            audio_st.time_base().den(inpAudioStream.time_base().den());
        }
        audio_c.codec_id(oformat.audio_codec());
        audio_c.codec_type(AVMEDIA_TYPE_AUDIO);
        /* put sample parameters */
        audio_c.bit_rate(audioBitrate);
        audio_c.sample_rate(sampleRate);
        audio_c.channels(audioChannels);
        audio_c.channel_layout(av_get_default_channel_layout(audioChannels));
        if (sampleFormat != AV_SAMPLE_FMT_NONE) {
            audio_c.sample_fmt(sampleFormat);
        } else {
            // use AV_SAMPLE_FMT_S16 by default, if available
            audio_c.sample_fmt(AV_SAMPLE_FMT_FLTP);
            IntPointer formats = audio_c.codec().sample_fmts();
            for (int i = 0; formats.get(i) != -1; i++) {
                if (formats.get(i) == AV_SAMPLE_FMT_S16) {
                    audio_c.sample_fmt(AV_SAMPLE_FMT_S16);
                    break;
                }
            }
        }
        audio_c.time_base().num(1).den(sampleRate);
        audio_st.time_base().num(1).den(sampleRate);
        switch(audio_c.sample_fmt()) {
            case AV_SAMPLE_FMT_U8:
            case AV_SAMPLE_FMT_U8P:
                audio_c.bits_per_raw_sample(8);
                break;
            case AV_SAMPLE_FMT_S16:
            case AV_SAMPLE_FMT_S16P:
                audio_c.bits_per_raw_sample(16);
                break;
            case AV_SAMPLE_FMT_S32:
            case AV_SAMPLE_FMT_S32P:
                audio_c.bits_per_raw_sample(32);
                break;
            case AV_SAMPLE_FMT_FLT:
            case AV_SAMPLE_FMT_FLTP:
                audio_c.bits_per_raw_sample(32);
                break;
            case AV_SAMPLE_FMT_DBL:
            case AV_SAMPLE_FMT_DBLP:
                audio_c.bits_per_raw_sample(64);
                break;
            default:
                assert false;
        }
        if (audioQuality >= 0) {
            audio_c.flags(audio_c.flags() | CODEC_FLAG_QSCALE);
            audio_c.global_quality((int) Math.round(FF_QP2LAMBDA * audioQuality));
        }
        // some formats want stream headers to be separate
        if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
            audio_c.flags(audio_c.flags() | CODEC_FLAG_GLOBAL_HEADER);
        }
        if ((audio_codec.capabilities() & CODEC_CAP_EXPERIMENTAL) != 0) {
            audio_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
        }
    }
    av_dump_format(oc, 0, filename, 1);
    /* now that all the parameters are set, we can open the audio and
           video codecs and allocate the necessary encode buffers */
    if (video_st != null && inpVideoStream == null) {
        AVDictionary options = new AVDictionary(null);
        if (videoQuality >= 0) {
            av_dict_set(options, "crf", "" + videoQuality, 0);
        }
        for (Entry<String, String> e : videoOptions.entrySet()) {
            av_dict_set(options, e.getKey(), e.getValue(), 0);
        }
        /* open the codec */
        if ((ret = avcodec_open2(video_c, video_codec, options)) < 0) {
            release();
            throw new Exception("avcodec_open2() error " + ret + ": Could not open video codec.");
        }
        av_dict_free(options);
        video_outbuf = null;
        if ((oformat.flags() & AVFMT_RAWPICTURE) == 0) {
            /* allocate output buffer */
            /* XXX: API change will be done */
            /* buffers passed into lav* can be allocated any way you prefer,
                   as long as they're aligned enough for the architecture, and
                   they're freed appropriately (such as using av_free for buffers
                   allocated with av_malloc) */
            // a la ffmpeg.c
            video_outbuf_size = Math.max(256 * 1024, 8 * video_c.width() * video_c.height());
            video_outbuf = new BytePointer(av_malloc(video_outbuf_size));
        }
        /* allocate the encoded raw picture */
        if ((picture = av_frame_alloc()) == null) {
            release();
            throw new Exception("av_frame_alloc() error: Could not allocate picture.");
        }
        // magic required by libx264
        picture.pts(0);
        int size = avpicture_get_size(video_c.pix_fmt(), video_c.width(), video_c.height());
        if ((picture_buf = new BytePointer(av_malloc(size))).isNull()) {
            release();
            throw new Exception("av_malloc() error: Could not allocate picture buffer.");
        }
        /* if the output format is not equal to the image format, then a temporary
               picture is needed too. It is then converted to the required output format */
        if ((tmp_picture = av_frame_alloc()) == null) {
            release();
            throw new Exception("av_frame_alloc() error: Could not allocate temporary picture.");
        }
        AVDictionary metadata = new AVDictionary(null);
        for (Entry<String, String> e : videoMetadata.entrySet()) {
            av_dict_set(metadata, e.getKey(), e.getValue(), 0);
        }
        video_st.metadata(metadata);
    }
    if (audio_st != null && inpAudioStream == null) {
        AVDictionary options = new AVDictionary(null);
        if (audioQuality >= 0) {
            av_dict_set(options, "crf", "" + audioQuality, 0);
        }
        for (Entry<String, String> e : audioOptions.entrySet()) {
            av_dict_set(options, e.getKey(), e.getValue(), 0);
        }
        /* open the codec */
        if ((ret = avcodec_open2(audio_c, audio_codec, options)) < 0) {
            release();
            throw new Exception("avcodec_open2() error " + ret + ": Could not open audio codec.");
        }
        av_dict_free(options);
        audio_outbuf_size = 256 * 1024;
        audio_outbuf = new BytePointer(av_malloc(audio_outbuf_size));
        /* ugly hack for PCM codecs (will be removed ASAP with new PCM
               support to compute the input frame size in samples */
        if (audio_c.frame_size() <= 1) {
            audio_outbuf_size = FF_MIN_BUFFER_SIZE;
            audio_input_frame_size = audio_outbuf_size / audio_c.channels();
            switch(audio_c.codec_id()) {
                case AV_CODEC_ID_PCM_S16LE:
                case AV_CODEC_ID_PCM_S16BE:
                case AV_CODEC_ID_PCM_U16LE:
                case AV_CODEC_ID_PCM_U16BE:
                    audio_input_frame_size >>= 1;
                    break;
                default:
                    break;
            }
        } else {
            audio_input_frame_size = audio_c.frame_size();
        }
        //int bufferSize = audio_input_frame_size * audio_c.bits_per_raw_sample()/8 * audio_c.channels();
        int planes = av_sample_fmt_is_planar(audio_c.sample_fmt()) != 0 ? (int) audio_c.channels() : 1;
        int data_size = av_samples_get_buffer_size((IntPointer) null, audio_c.channels(), audio_input_frame_size, audio_c.sample_fmt(), 1) / planes;
        samples_out = new BytePointer[planes];
        for (int i = 0; i < samples_out.length; i++) {
            samples_out[i] = new BytePointer(av_malloc(data_size)).capacity(data_size);
        }
        samples_in = new Pointer[AVFrame.AV_NUM_DATA_POINTERS];
        samples_in_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);
        samples_out_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);
        /* allocate the audio frame */
        if ((frame = av_frame_alloc()) == null) {
            release();
            throw new Exception("av_frame_alloc() error: Could not allocate audio frame.");
        }
        // magic required by libvorbis and webm
        frame.pts(0);
        AVDictionary metadata = new AVDictionary(null);
        for (Entry<String, String> e : audioMetadata.entrySet()) {
            av_dict_set(metadata, e.getKey(), e.getValue(), 0);
        }
        audio_st.metadata(metadata);
    }
    /* open the output file, if needed */
    if ((oformat.flags() & AVFMT_NOFILE) == 0) {
        AVIOContext pb = new AVIOContext(null);
        if ((ret = avio_open(pb, filename, AVIO_FLAG_WRITE)) < 0) {
            release();
            throw new Exception("avio_open error() error " + ret + ": Could not open '" + filename + "'");
        }
        oc.pb(pb);
    }
    AVDictionary options = new AVDictionary(null);
    for (Entry<String, String> e : this.options.entrySet()) {
        av_dict_set(options, e.getKey(), e.getValue(), 0);
    }
    AVDictionary metadata = new AVDictionary(null);
    for (Entry<String, String> e : this.metadata.entrySet()) {
        av_dict_set(metadata, e.getKey(), e.getValue(), 0);
    }
    /* write the stream header, if any */
    avformat_write_header(oc.metadata(metadata), options);
    av_dict_free(options);
}
Also used : PointerPointer(org.bytedeco.javacpp.PointerPointer) IntPointer(org.bytedeco.javacpp.IntPointer) BytePointer(org.bytedeco.javacpp.BytePointer)

Example 2 with IntPointer

use of org.bytedeco.javacpp.IntPointer in project bigbluebutton by bigbluebutton.

the class Frame method createIndexer.

/** Returns an {@link Indexer} for the <i>i</i>th image plane. */
public <I extends Indexer> I createIndexer(boolean direct, int i) {
    long[] sizes = { imageHeight, imageWidth, imageChannels };
    long[] strides = { imageStride, imageChannels, 1 };
    Buffer buffer = image[i];
    Object array = buffer.hasArray() ? buffer.array() : null;
    switch(imageDepth) {
        case DEPTH_UBYTE:
            return array != null ? (I) UByteIndexer.create((byte[]) array, sizes, strides) : direct ? (I) UByteIndexer.create((ByteBuffer) buffer, sizes, strides) : (I) UByteIndexer.create(new BytePointer((ByteBuffer) buffer), sizes, strides, false);
        case DEPTH_BYTE:
            return array != null ? (I) ByteIndexer.create((byte[]) array, sizes, strides) : direct ? (I) ByteIndexer.create((ByteBuffer) buffer, sizes, strides) : (I) ByteIndexer.create(new BytePointer((ByteBuffer) buffer), sizes, strides, false);
        case DEPTH_USHORT:
            return array != null ? (I) UShortIndexer.create((short[]) array, sizes, strides) : direct ? (I) UShortIndexer.create((ShortBuffer) buffer, sizes, strides) : (I) UShortIndexer.create(new ShortPointer((ShortBuffer) buffer), sizes, strides, false);
        case DEPTH_SHORT:
            return array != null ? (I) ShortIndexer.create((short[]) array, sizes, strides) : direct ? (I) ShortIndexer.create((ShortBuffer) buffer, sizes, strides) : (I) ShortIndexer.create(new ShortPointer((ShortBuffer) buffer), sizes, strides, false);
        case DEPTH_INT:
            return array != null ? (I) IntIndexer.create((int[]) array, sizes, strides) : direct ? (I) IntIndexer.create((IntBuffer) buffer, sizes, strides) : (I) IntIndexer.create(new IntPointer((IntBuffer) buffer), sizes, strides, false);
        case DEPTH_LONG:
            return array != null ? (I) LongIndexer.create((long[]) array, sizes, strides) : direct ? (I) LongIndexer.create((LongBuffer) buffer, sizes, strides) : (I) LongIndexer.create(new LongPointer((LongBuffer) buffer), sizes, strides, false);
        case DEPTH_FLOAT:
            return array != null ? (I) FloatIndexer.create((float[]) array, sizes, strides) : direct ? (I) FloatIndexer.create((FloatBuffer) buffer, sizes, strides) : (I) FloatIndexer.create(new FloatPointer((FloatBuffer) buffer), sizes, strides, false);
        case DEPTH_DOUBLE:
            return array != null ? (I) DoubleIndexer.create((double[]) array, sizes, strides) : direct ? (I) DoubleIndexer.create((DoubleBuffer) buffer, sizes, strides) : (I) DoubleIndexer.create(new DoublePointer((DoubleBuffer) buffer), sizes, strides, false);
        default:
            assert false;
    }
    return null;
}
Also used : FloatBuffer(java.nio.FloatBuffer) DoubleBuffer(java.nio.DoubleBuffer) ShortBuffer(java.nio.ShortBuffer) ByteBuffer(java.nio.ByteBuffer) IntBuffer(java.nio.IntBuffer) Buffer(java.nio.Buffer) LongBuffer(java.nio.LongBuffer) DoubleBuffer(java.nio.DoubleBuffer) LongBuffer(java.nio.LongBuffer) BytePointer(org.bytedeco.javacpp.BytePointer) DoublePointer(org.bytedeco.javacpp.DoublePointer) FloatBuffer(java.nio.FloatBuffer) ByteBuffer(java.nio.ByteBuffer) ShortPointer(org.bytedeco.javacpp.ShortPointer) LongPointer(org.bytedeco.javacpp.LongPointer) FloatPointer(org.bytedeco.javacpp.FloatPointer) IntBuffer(java.nio.IntBuffer) IntPointer(org.bytedeco.javacpp.IntPointer) ShortBuffer(java.nio.ShortBuffer)

Example 3 with IntPointer

use of org.bytedeco.javacpp.IntPointer in project javacv by bytedeco.

the class FaceRecognizerInVideo method main.

public static void main(String[] args) throws Exception {
    OpenCVFrameConverter.ToMat converterToMat = new OpenCVFrameConverter.ToMat();
    if (args.length < 2) {
        System.out.println("Two parameters are required to run this program, first parameter is the analized video and second parameter is the trained result for fisher faces.");
    }
    String videoFileName = args[0];
    String trainedResult = args[1];
    CascadeClassifier face_cascade = new CascadeClassifier("data\\haarcascade_frontalface_default.xml");
    FaceRecognizer lbphFaceRecognizer = LBPHFaceRecognizer.create();
    lbphFaceRecognizer.read(trainedResult);
    File f = new File(videoFileName);
    OpenCVFrameGrabber grabber = null;
    try {
        grabber = OpenCVFrameGrabber.createDefault(f);
        grabber.start();
    } catch (Exception e) {
        System.err.println("Failed start the grabber.");
    }
    Frame videoFrame = null;
    Mat videoMat = new Mat();
    while (true) {
        videoFrame = grabber.grab();
        videoMat = converterToMat.convert(videoFrame);
        Mat videoMatGray = new Mat();
        // Convert the current frame to grayscale:
        cvtColor(videoMat, videoMatGray, COLOR_BGRA2GRAY);
        equalizeHist(videoMatGray, videoMatGray);
        Point p = new Point();
        RectVector faces = new RectVector();
        // Find the faces in the frame:
        face_cascade.detectMultiScale(videoMatGray, faces);
        // annotate it in the video. Cool or what?
        for (int i = 0; i < faces.size(); i++) {
            Rect face_i = faces.get(i);
            Mat face = new Mat(videoMatGray, face_i);
            // If fisher face recognizer is used, the face need to be
            // resized.
            // resize(face, face_resized, new Size(im_width, im_height),
            // 1.0, 1.0, INTER_CUBIC);
            // Now perform the prediction, see how easy that is:
            IntPointer label = new IntPointer(1);
            DoublePointer confidence = new DoublePointer(1);
            lbphFaceRecognizer.predict(face, label, confidence);
            int prediction = label.get(0);
            // And finally write all we've found out to the original image!
            // First of all draw a green rectangle around the detected face:
            rectangle(videoMat, face_i, new Scalar(0, 255, 0, 1));
            // Create the text we will annotate the box with:
            String box_text = "Prediction = " + prediction;
            // Calculate the position for annotated text (make sure we don't
            // put illegal values in there):
            int pos_x = Math.max(face_i.tl().x() - 10, 0);
            int pos_y = Math.max(face_i.tl().y() - 10, 0);
            // And now put it into the image:
            putText(videoMat, box_text, new Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0));
        }
        // Show the result:
        imshow("face_recognizer", videoMat);
        char key = (char) waitKey(20);
        // Exit this loop on escape:
        if (key == 27) {
            destroyAllWindows();
            break;
        }
    }
}
Also used : Frame(org.bytedeco.javacv.Frame) DoublePointer(org.bytedeco.javacpp.DoublePointer) OpenCVFrameGrabber(org.bytedeco.javacv.OpenCVFrameGrabber) Exception(org.bytedeco.javacv.FrameGrabber.Exception) IntPointer(org.bytedeco.javacpp.IntPointer) OpenCVFrameConverter(org.bytedeco.javacv.OpenCVFrameConverter) File(java.io.File)

Example 4 with IntPointer

use of org.bytedeco.javacpp.IntPointer in project javacv by bytedeco.

the class OpenCVFaceRecognizer method main.

public static void main(String[] args) {
    String trainingDir = args[0];
    Mat testImage = imread(args[1], CV_LOAD_IMAGE_GRAYSCALE);
    File root = new File(trainingDir);
    FilenameFilter imgFilter = new FilenameFilter() {

        public boolean accept(File dir, String name) {
            name = name.toLowerCase();
            return name.endsWith(".jpg") || name.endsWith(".pgm") || name.endsWith(".png");
        }
    };
    File[] imageFiles = root.listFiles(imgFilter);
    MatVector images = new MatVector(imageFiles.length);
    Mat labels = new Mat(imageFiles.length, 1, CV_32SC1);
    IntBuffer labelsBuf = labels.createBuffer();
    int counter = 0;
    for (File image : imageFiles) {
        Mat img = imread(image.getAbsolutePath(), CV_LOAD_IMAGE_GRAYSCALE);
        int label = Integer.parseInt(image.getName().split("\\-")[0]);
        images.put(counter, img);
        labelsBuf.put(counter, label);
        counter++;
    }
    FaceRecognizer faceRecognizer = FisherFaceRecognizer.create();
    // FaceRecognizer faceRecognizer = EigenFaceRecognizer.create();
    // FaceRecognizer faceRecognizer = LBPHFaceRecognizer.create();
    faceRecognizer.train(images, labels);
    IntPointer label = new IntPointer(1);
    DoublePointer confidence = new DoublePointer(1);
    faceRecognizer.predict(testImage, label, confidence);
    int predictedLabel = label.get(0);
    System.out.println("Predicted label: " + predictedLabel);
}
Also used : Mat(org.bytedeco.javacpp.opencv_core.Mat) FilenameFilter(java.io.FilenameFilter) IntBuffer(java.nio.IntBuffer) IntPointer(org.bytedeco.javacpp.IntPointer) DoublePointer(org.bytedeco.javacpp.DoublePointer) FaceRecognizer(org.bytedeco.javacpp.opencv_face.FaceRecognizer) EigenFaceRecognizer(org.bytedeco.javacpp.opencv_face.EigenFaceRecognizer) FisherFaceRecognizer(org.bytedeco.javacpp.opencv_face.FisherFaceRecognizer) LBPHFaceRecognizer(org.bytedeco.javacpp.opencv_face.LBPHFaceRecognizer) MatVector(org.bytedeco.javacpp.opencv_core.MatVector) File(java.io.File)

Example 5 with IntPointer

use of org.bytedeco.javacpp.IntPointer in project javacv by bytedeco.

the class OpticalFlowTracker method main.

public static void main(String[] args) {
    // Load two images and allocate other structures
    IplImage imgA = cvLoadImage("image0.png", CV_LOAD_IMAGE_GRAYSCALE);
    IplImage imgB = cvLoadImage("image1.png", CV_LOAD_IMAGE_GRAYSCALE);
    CvSize img_sz = cvGetSize(imgA);
    int win_size = 15;
    // IplImage imgC = cvLoadImage("OpticalFlow1.png",
    // CV_LOAD_IMAGE_UNCHANGED);
    IplImage imgC = cvLoadImage("image0.png", CV_LOAD_IMAGE_UNCHANGED);
    // Get the features for tracking
    IplImage eig_image = cvCreateImage(img_sz, IPL_DEPTH_32F, 1);
    IplImage tmp_image = cvCreateImage(img_sz, IPL_DEPTH_32F, 1);
    IntPointer corner_count = new IntPointer(1).put(MAX_CORNERS);
    CvPoint2D32f cornersA = new CvPoint2D32f(MAX_CORNERS);
    CvArr mask = null;
    cvGoodFeaturesToTrack(imgA, eig_image, tmp_image, cornersA, corner_count, 0.05, 5.0, mask, 3, 0, 0.04);
    cvFindCornerSubPix(imgA, cornersA, corner_count.get(), cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
    // Call Lucas Kanade algorithm
    BytePointer features_found = new BytePointer(MAX_CORNERS);
    FloatPointer feature_errors = new FloatPointer(MAX_CORNERS);
    CvSize pyr_sz = cvSize(imgA.width() + 8, imgB.height() / 3);
    IplImage pyrA = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);
    IplImage pyrB = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);
    CvPoint2D32f cornersB = new CvPoint2D32f(MAX_CORNERS);
    cvCalcOpticalFlowPyrLK(imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count.get(), cvSize(win_size, win_size), 5, features_found, feature_errors, cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3), 0);
    // Make an image of the results
    for (int i = 0; i < corner_count.get(); i++) {
        if (features_found.get(i) == 0 || feature_errors.get(i) > 550) {
            System.out.println("Error is " + feature_errors.get(i) + "/n");
            continue;
        }
        System.out.println("Got it/n");
        cornersA.position(i);
        cornersB.position(i);
        CvPoint p0 = cvPoint(Math.round(cornersA.x()), Math.round(cornersA.y()));
        CvPoint p1 = cvPoint(Math.round(cornersB.x()), Math.round(cornersB.y()));
        cvLine(imgC, p0, p1, CV_RGB(255, 0, 0), 2, 8, 0);
    }
    cvSaveImage("image0-1.png", imgC);
    cvNamedWindow("LKpyr_OpticalFlow", 0);
    cvShowImage("LKpyr_OpticalFlow", imgC);
    cvWaitKey(0);
}
Also used : FloatPointer(org.bytedeco.javacpp.FloatPointer) IntPointer(org.bytedeco.javacpp.IntPointer) BytePointer(org.bytedeco.javacpp.BytePointer)

Aggregations

IntPointer (org.bytedeco.javacpp.IntPointer)31 DoublePointer (org.bytedeco.javacpp.DoublePointer)17 FloatPointer (org.bytedeco.javacpp.FloatPointer)16 DataBuffer (org.nd4j.linalg.api.buffer.DataBuffer)13 INDArray (org.nd4j.linalg.api.ndarray.INDArray)13 Pointer (org.bytedeco.javacpp.Pointer)12 CUstream_st (org.bytedeco.javacpp.cuda.CUstream_st)12 CublasPointer (org.nd4j.linalg.jcublas.CublasPointer)12 CudaContext (org.nd4j.linalg.jcublas.context.CudaContext)12 CudaPointer (org.nd4j.jita.allocator.pointers.CudaPointer)10 org.nd4j.jita.allocator.pointers.cuda.cusolverDnHandle_t (org.nd4j.jita.allocator.pointers.cuda.cusolverDnHandle_t)10 GridExecutioner (org.nd4j.linalg.api.ops.executioner.GridExecutioner)10 BytePointer (org.bytedeco.javacpp.BytePointer)8 BlasException (org.nd4j.linalg.api.blas.BlasException)8 ByteBuffer (java.nio.ByteBuffer)5 IntBuffer (java.nio.IntBuffer)5 DoubleBuffer (java.nio.DoubleBuffer)4 FloatBuffer (java.nio.FloatBuffer)4 ShortBuffer (java.nio.ShortBuffer)4 PointerPointer (org.bytedeco.javacpp.PointerPointer)4