use of org.bytedeco.javacpp.Pointer in project javacv by bytedeco.
the class OpenCVFrameConverter method convertToIplImage.
public IplImage convertToIplImage(Frame frame) {
if (frame == null || frame.image == null) {
return null;
} else if (frame.opaque instanceof IplImage) {
return (IplImage) frame.opaque;
} else if (!isEqual(frame, img)) {
int depth = getIplImageDepth(frame.imageDepth);
if (img != null) {
img.releaseReference();
}
img = depth < 0 ? null : (IplImage) IplImage.create(frame.imageWidth, frame.imageHeight, depth, frame.imageChannels, new Pointer(frame.image[0].position(0))).widthStep(frame.imageStride * Math.abs(frame.imageDepth) / 8).imageSize(frame.image[0].capacity() * Math.abs(frame.imageDepth) / 8).retainReference();
}
return img;
}
use of org.bytedeco.javacpp.Pointer in project javacv by bytedeco.
the class FFmpegFrameRecorder method recordImage.
public synchronized boolean recordImage(int width, int height, int depth, int channels, int stride, int pixelFormat, Buffer... image) throws Exception {
try (PointerScope scope = new PointerScope()) {
if (video_st == null) {
throw new Exception("No video output stream (Is imageWidth > 0 && imageHeight > 0 and has start() been called?)");
}
if (!started) {
throw new Exception("start() was not called successfully!");
}
int ret;
if (image == null || image.length == 0) {
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
} else {
int step = stride * Math.abs(depth) / 8;
BytePointer data = image[0] instanceof ByteBuffer ? new BytePointer((ByteBuffer) image[0]).position(0) : new BytePointer(new Pointer(image[0]).position(0));
if (pixelFormat == AV_PIX_FMT_NONE) {
if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 3) {
pixelFormat = AV_PIX_FMT_BGR24;
} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 1) {
pixelFormat = AV_PIX_FMT_GRAY8;
} else if ((depth == Frame.DEPTH_USHORT || depth == Frame.DEPTH_SHORT) && channels == 1) {
pixelFormat = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN) ? AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY16LE;
} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 4) {
pixelFormat = AV_PIX_FMT_RGBA;
} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 2) {
// Android's camera capture format
pixelFormat = AV_PIX_FMT_NV21;
} else {
throw new Exception("Could not guess pixel format of image: depth=" + depth + ", channels=" + channels);
}
}
if (pixelFormat == AV_PIX_FMT_NV21) {
step = width;
}
if (video_c.pix_fmt() != pixelFormat || video_c.width() != width || video_c.height() != height) {
/* convert to the codec pixel format if needed */
img_convert_ctx = sws_getCachedContext(img_convert_ctx, width, height, pixelFormat, video_c.width(), video_c.height(), video_c.pix_fmt(), imageScalingFlags != 0 ? imageScalingFlags : SWS_BILINEAR, null, null, (DoublePointer) null);
if (img_convert_ctx == null) {
throw new Exception("sws_getCachedContext() error: Cannot initialize the conversion context.");
}
av_image_fill_arrays(new PointerPointer(tmp_picture), tmp_picture.linesize(), data, pixelFormat, width, height, 1);
av_image_fill_arrays(new PointerPointer(picture), picture.linesize(), picture_buf, video_c.pix_fmt(), video_c.width(), video_c.height(), 1);
tmp_picture.linesize(0, step);
tmp_picture.format(pixelFormat);
tmp_picture.width(width);
tmp_picture.height(height);
picture.format(video_c.pix_fmt());
picture.width(video_c.width());
picture.height(video_c.height());
sws_scale(img_convert_ctx, new PointerPointer(tmp_picture), tmp_picture.linesize(), 0, height, new PointerPointer(picture), picture.linesize());
} else {
av_image_fill_arrays(new PointerPointer(picture), picture.linesize(), data, pixelFormat, width, height, 1);
picture.linesize(0, step);
picture.format(pixelFormat);
picture.width(width);
picture.height(height);
}
}
// if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
// if (image == null || image.length == 0) {
// return false;
// }
// /* raw video case. The API may change slightly in the future for that? */
// av_init_packet(video_pkt);
// video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
// video_pkt.stream_index(video_st.index());
// video_pkt.data(new BytePointer(picture));
// video_pkt.size(Loader.sizeof(AVFrame.class));
// } else {
/* encode the image */
picture.quality(video_c.global_quality());
if ((ret = avcodec_send_frame(video_c, image == null || image.length == 0 ? null : picture)) < 0 && image != null && image.length != 0) {
throw new Exception("avcodec_send_frame() error " + ret + ": Error sending a video frame for encoding.");
}
// magic required by libx264
picture.pts(picture.pts() + 1);
/* if zero size, it means the image was buffered */
got_video_packet[0] = 0;
while (ret >= 0) {
av_new_packet(video_pkt, video_outbuf_size);
ret = avcodec_receive_packet(video_c, video_pkt);
if (ret == AVERROR_EAGAIN() || ret == AVERROR_EOF()) {
av_packet_unref(video_pkt);
break;
} else if (ret < 0) {
av_packet_unref(video_pkt);
throw new Exception("avcodec_receive_packet() error " + ret + ": Error during video encoding.");
}
got_video_packet[0] = 1;
if (video_pkt.pts() != AV_NOPTS_VALUE) {
video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
}
if (video_pkt.dts() != AV_NOPTS_VALUE) {
video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
}
video_pkt.stream_index(video_st.index());
/* write the compressed frame in the media file */
writePacket(AVMEDIA_TYPE_VIDEO, video_pkt);
}
// }
return image != null ? (video_pkt.flags() & AV_PKT_FLAG_KEY) != 0 : got_video_packet[0] != 0;
}
}
use of org.bytedeco.javacpp.Pointer in project javacv by bytedeco.
the class Frame method close.
@Override
public void close() {
if (opaque instanceof Pointer[]) {
for (Pointer p : (Pointer[]) opaque) {
if (p != null) {
p.releaseReference();
p = null;
}
}
opaque = null;
}
}
Aggregations