Reputation: 61
I am developing one software that takes frames using OpenCV then sends them over network. I was using RTMP protocol for that, i have created one middle rtmp server and solved it.However, i had to solve the problem using TCP. On the sender side I do not face any problem, i can smoothly convert From cv::Mat-> AVframe->Avpacket and send it over network. However, on the receiver side, i can not create AVframes. This is my sender code(dont have any issue, it sends over TCP AS EXPECTED):
void write_frame(AVCodecContext *codec_ctx, AVFormatContext *fmt_ctx, AVFrame *frame)
{
AVPacket pkt = { 0 };
av_new_packet(&pkt, 0);
int ret = avcodec_send_frame(codec_ctx, frame);
if (ret < 0)
{
std::cout << "Error sending frame to codec context!" << std::endl;
exit(1);
}
ret = avcodec_receive_packet(codec_ctx, &pkt);
if (ret < 0)
{
std::cout << "Error receiving packet from codec context!" << std::endl;
exit(1);
}
av_interleaved_write_frame(fmt_ctx, &pkt);
av_packet_unref(&pkt);
}
void stream_video(double width, double height, int fps, int camID, int bitrate, std::string
codec_profile, std::string server)
{
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
av_register_all();
#endif
avformat_network_init();
const char *output = server.c_str();
int ret;
auto cam = get_device(camID, width, height);
std::vector<uint8_t> imgbuf(height * width * 3 + 16);
cv::Mat image(height, width, CV_8UC3, imgbuf.data(), width * 3);
AVFormatContext *ofmt_ctx = nullptr;
AVCodec *out_codec = nullptr;
AVStream *out_stream = nullptr;
AVCodecContext *out_codec_ctx = nullptr;
initialize_avformat_context(ofmt_ctx, "flv");
initialize_io_context(ofmt_ctx, output);
out_codec = const_cast<AVCodec*>(avcodec_find_encoder(AV_CODEC_ID_H264));
out_stream = avformat_new_stream(ofmt_ctx, out_codec);
out_codec_ctx = avcodec_alloc_context3(out_codec);
set_codec_params(ofmt_ctx, out_codec_ctx, width, height, fps, bitrate);
initialize_codec_stream(out_stream, out_codec_ctx, (out_codec), codec_profile);
out_stream->codecpar->extradata = out_codec_ctx->extradata;
out_stream->codecpar->extradata_size = out_codec_ctx->extradata_size;
av_dump_format(ofmt_ctx, 0, output, 1);
auto *swsctx = initialize_sample_scaler(out_codec_ctx, width, height);
auto *frame = allocate_frame_buffer(out_codec_ctx, width, height);
int cur_size;
uint8_t *cur_ptr;
ret = avformat_write_header(ofmt_ctx, nullptr);
if (ret < 0)
{
std::cout << "Could not write header!" << std::endl;
exit(1);
}
bool end_of_stream = false;
do
{
cam >> image;
const int stride[] = { static_cast<int>(image.step[0]) };
sws_scale(swsctx, &image.data, stride, 0, image.rows, frame->data, frame->linesize);
frame->pts += av_rescale_q(1, out_codec_ctx->time_base, out_stream->time_base);
write_frame(out_codec_ctx, ofmt_ctx, frame);
} while (!end_of_stream);
av_write_trailer(ofmt_ctx);
av_frame_free(&frame);
avcodec_close(out_codec_ctx);
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
}
This is my receiver code that i have too many questions:
1.av_parser_parse2(acpc, out_codec_ctx, &pkt_demo->data, &out_buff_size, (uint8_t*)buffer_data, buffer_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0).
As i understand, this function takes bytes untill it can create AVpacket.(In my case this function for twice, in the first one reads as big as my buffer size (4096), at the second one it reads about ~2900, then goes the decoding line.) My questions about this function, It takes AVpacket.pts, AVpacket.dst and AVpacket.pos but, while i am receiving some bytes how can I pass those variables.(I am already trying to fill AVpacket). After passing this function, i am having error in int response = avcodec_send_packet(pCodecContext, pPacket), line. The error:The error And sometimes getting this error :The error-2
////////CONFIG CODEC CONTEXT
avcodec_open2(out_codec_ctx, out_codec, NULL);
////////PARSER INITIALIZATION
AVCodecParserContext * acpc = av_parser_init(AV_CODEC_ID_H264);
uint8_t* out_buff;
int out_buff_size;
////////PARSER INITIALIZATION
size_t buffer_size = 4096;
uint8_t* buffer_data = new uint8_t[buffer_size];
//AVPacket pkt_demo;
AVPacket *pkt_demo = av_packet_alloc();
AVFrame *pFrame = av_frame_alloc();
int response = 0;
while (recv(ClientSocket, (char*)buffer_data, buffer_size, 0) > 0) {
int len = av_parser_parse2(acpc, out_codec_ctx, &pkt_demo->data, &out_buff_size,
(uint8_t*)buffer_data, buffer_size, pkt_demo->pts, pkt_demo->dts, -1);
std::cout << "READING!!!!" << std::endl;
if (out_buff_size > 0) {
std::cout << "started!!!!" << std::endl;
//pkt_demo->data = out_buff;
pkt_demo->size = out_buff_size;
response = decode_packet(pkt_demo, out_codec_ctx, pFrame);
if (response < 0)break;
}
av_packet_unref(pkt_demo);
}
delete[] buffer_data;
static int decode_packet(AVPacket *pPacket, AVCodecContext *pCodecContext, AVFrame *pFrame)
{
int response = avcodec_send_packet(pCodecContext, pPacket);
std::cout << response << std::endl;
if (response < 0) {
return response;
}
while (response >= 0)
{
response = avcodec_receive_frame(pCodecContext, pFrame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
break;
}
else if (response < 0) {
return response;
}
}
return 0;
}
In addition to that, i have checked the CodecContext, i have hardcoded every property on the receiver side, so they are the same.(Which also thisis another thing that confuses me, because AVpacket should have already have the context information right?)
Upvotes: 2
Views: 891
Reputation: 61
I have better understanding now. At the sender side, initialize_avformat_context(ofmt_ctx, "flv")
, should be initialize_avformat_context(ofmt_ctx, "avi")
. And Reveiver
side has no issue, it was okay since the beginning. I was confused about av_parser_parse2
function arguments, but AVpacket
already has all the information except, AVCodecParserContext
, so declaring that is enough.
Upvotes: 2