Reputation: 1607
I'm trying to convert a cv::Mat
to an AVFrame
to encode it then in H.264 and wanted to start from a simple example, as I'm a newbie in both. So I first read in a JPEG file, and then do the pixel format conversion with sws_scale()
from AV_PIX_FMT_BGR24
to AV_PIX_FMT_YUV420P
keeping the dimensions the same, and it all goes fine until I call avcodec_encode_video2()
.
I read quite a few discussions regarding an AVFrame
allocation and the question Segmentation fault while avcodec_encode_video2 seemed like a match but I just can't see what I'm missing or getting wrong.
Here is the minimal code that you can reproduce the crash, it should be compiled with,
g++ -o OpenCV2FFmpeg OpenCV2FFmpeg.cpp -lopencv_imgproc -lopencv_highgui -lopencv_core -lswscale -lavutil -lavcodec -lavformat
It's output on my system,
cv::Mat [width=420, height=315, depth=0, channels=3, step=1260]
I'll soon crash..
Segmentation fault
And that sample.jpg
file's details by identify
tool,
~temporary/sample.jpg JPEG 420x315 420x315+0+0 8-bit sRGB 38.3KB 0.000u 0:00.000
Please note that I'm trying to create a video out of a single image, just to keep things simple.
#include <iostream>
#include <cassert>
using namespace std;
extern "C" {
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
}
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
const string TEST_IMAGE = "/home/baris/temporary/sample.jpg";
int main(int /*argc*/, char** argv)
{
av_register_all();
avcodec_register_all();
/**
* Initialise the encoder
*/
AVCodec *h264encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
AVFormatContext *cv2avFormatContext = avformat_alloc_context();
/**
* Create a stream and allocate frames
*/
AVStream *h264outputstream = avformat_new_stream(cv2avFormatContext, h264encoder);
avcodec_get_context_defaults3(h264outputstream->codec, h264encoder);
AVFrame *sourceAvFrame = av_frame_alloc(), *destAvFrame = av_frame_alloc();
int got_frame;
/**
* Pixel formats for the input and the output
*/
AVPixelFormat sourcePixelFormat = AV_PIX_FMT_BGR24;
AVPixelFormat destPixelFormat = AV_PIX_FMT_YUV420P;
/**
* Create cv::Mat
*/
cv::Mat cvFrame = cv::imread(TEST_IMAGE, CV_LOAD_IMAGE_COLOR);
int width = cvFrame.size().width, height = cvFrame.size().height;
cerr << "cv::Mat [width=" << width << ", height=" << height << ", depth=" << cvFrame.depth() << ", channels=" << cvFrame.channels() << ", step=" << cvFrame.step << "]" << endl;
h264outputstream->codec->pix_fmt = destPixelFormat;
h264outputstream->codec->width = cvFrame.cols;
h264outputstream->codec->height = cvFrame.rows;
/**
* Prepare the conversion context
*/
SwsContext *bgr2yuvcontext = sws_getContext(width, height,
sourcePixelFormat,
h264outputstream->codec->width, h264outputstream->codec->height,
h264outputstream->codec->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
/**
* Convert and encode frames
*/
for (uint i=0; i < 250; i++)
{
/**
* Allocate source frame, i.e. input to sws_scale()
*/
avpicture_alloc((AVPicture*)sourceAvFrame, sourcePixelFormat, width, height);
for (int h = 0; h < height; h++)
memcpy(&(sourceAvFrame->data[0][h*sourceAvFrame->linesize[0]]), &(cvFrame.data[h*cvFrame.step]), width*3);
/**
* Allocate destination frame, i.e. output from sws_scale()
*/
avpicture_alloc((AVPicture *)destAvFrame, destPixelFormat, width, height);
sws_scale(bgr2yuvcontext, sourceAvFrame->data, sourceAvFrame->linesize,
0, height, destAvFrame->data, destAvFrame->linesize);
/**
* Prepare an AVPacket for encoded output
*/
AVPacket avEncodedPacket;
av_init_packet(&avEncodedPacket);
avEncodedPacket.data = NULL;
avEncodedPacket.size = 0;
// av_free_packet(&avEncodedPacket); w/ or w/o result doesn't change
cerr << "I'll soon crash.." << endl;
if (avcodec_encode_video2(h264outputstream->codec, &avEncodedPacket, destAvFrame, &got_frame) < 0)
exit(1);
cerr << "Checking if we have a frame" << endl;
if (got_frame)
av_write_frame(cv2avFormatContext, &avEncodedPacket);
av_free_packet(&avEncodedPacket);
av_frame_free(&sourceAvFrame);
av_frame_free(&destAvFrame);
}
}
Thanks in advance!
EDIT: And the stack trace after the crash,
Thread 2 (Thread 0x7fffe5506700 (LWP 10005)):
#0 0x00007ffff4bf6c5d in poll () at /lib64/libc.so.6
#1 0x00007fffe9073268 in () at /usr/lib64/libusb-1.0.so.0
#2 0x00007ffff47010a4 in start_thread () at /lib64/libpthread.so.0
#3 0x00007ffff4bff08d in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7ffff7f869c0 (LWP 10001)):
#0 0x00007ffff5ecc7dc in avcodec_encode_video2 () at /usr/lib64/libavcodec.so.56
#1 0x00000000004019b6 in main(int, char**) (argv=0x7fffffffd3d8) at ../src/OpenCV2FFmpeg.cpp:99
EDIT2: Problem was that I hadn't avcodec_open2()
the codec as spotted by Ronald. Final version of the code is at https://github.com/barisdemiray/opencv2ffmpeg/, with leaks and probably other problems hoping that I'll improve it while learning both libraries.
Upvotes: 1
Views: 2054
Reputation: 11174
Please provide a gdb backtrace after it crashes. I'm pretty sure it'll crash because your picture pointers are unaligned (avpicture_alloc() will allocate unaligned images, and avcodec_encode_video2() expects aligned data pointers), but there may be additional reasons why it would crash also.
To align data pointers in a AVFrame, use av_image_alloc() and related functions, with align=32.
Another problem is that you didn't call avcodec_open2(h264outputstream->codec, h264encoder).
Upvotes: 3