Reputation: 31
I am new to gstremaer. I have written a code for playing avi file using gstreamer. But on executing the code it just hangs after a while, I am unable to debug whats the problem, Can someone help me please. The code and the output is as below:
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[])
{
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(pipeline);
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source),"location",argv[1],NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || !video_convertor || !audio_sink || !video_sink )
{ g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline,GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline,GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data)
{
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");
audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad *videoconvertsink;
GstPad *audioconvertsink ;
g_print(" In dynamic_decodepad ADDING PAD\n");
videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
gst_pad_link(pad,videoconvertsink);
audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
gst_pad_link(pad,audioconvertsink );
g_print(" In dynamic_decodepad ADDING PAD2\n");
}
Output:
In playing state
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 8192
default
In dynamic ADDING PAD
In dynamic ADDING PAD2
In dynamic ADDING PAD
In dynamic ADDING PAD2
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In dynamic_decodepad ADDING PAD
In dynamic_decodepad ADDING PAD2
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
It hangs at this point. Any help is appreciated. Thanks in advance.
Upvotes: 3
Views: 5708
Reputation: 12898
Your code is wrong in several ways, that is why my answer is so long.
First of all, gst_pipeline_new
returns GstElement*
not GstPipeline*
:
- pipeline = gst_pipeline_new("PIPELINE");
+ GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
- bus = gst_pipeline_get_bus(pipeline);
+ bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
Then, your pipeline is wrong: you trying to decode both streams (audio and video) with one decodebin
but you need two. Create it and don't forget to add it to the bin:
videoqueue = gst_element_factory_make("queue","Queue for video");
+ audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
- gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
+ gst_bin_add_many(
+ Bin,
+ demuxer,
+ audioqueue,videoqueue,
+ audio_decoder,audio_convertor,
+ video_decoder,video_convertor,
+ audio_sink,video_sink,
+ NULL);
And, by the way, it's better to use decodebin2
as decodebin
is deprecated.
Then you linking some elements dynamically: demuxer to queue and decodebin to convertors. Hence you should not create link between decodebin and convertors with gst_element_link_many
:
gst_element_link(source,demuxer);
- gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
- gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
+ gst_element_link_many(audioqueue,audio_decoder,NULL);
+ gst_element_link_many(audio_convertor,audio_sink,NULL);
+ gst_element_link_many(videoqueue,video_decoder,NULL);
+ gst_element_link_many(video_convertor,video_sink,NULL);
And of course, as we added audio_decoder
decodebin, we need to handle it's pad creation signal:
+ g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
And now we are at the most interesting part.
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");
audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");
}
This is completely wrong! dynamic_addpad
is called on each pad creation. avidemux
commonly creates two pads (one for each data stream): "audio_00" and "video_00". So, dynamic_addpad
will be called twice and we need to distinguish what to link depending on pad name:
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
Almost the same is for dynamic_decodepad
. As it's only one src pad is created by decodebin, it will be easier to create separate handlers for video_decoder
and audio_decoder
.
But for pedagogical reasons I will do it in one function. Now we can distinguish which element to connect to pad by it's caps.
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
gst_pad_can_link
will not work in dynamic_addpath
because it's possible to connect query
element both to "audio_00" and "video_00".
That's it. Don't hesitate to ask if you have other questions.
Upvotes: 16