Reputation: 1
In the code, as you can see i have two input pipeline gst_element_link_many(v4l2src1, videoconvert1, capsfilter1, queue1, input_selector, NULL); gst_element_link_many(v4l2src2, videoconvert2, capsfilter2, queue2, input_selector, NULL);
so initially first camera or v4l2src1 is going live and after camera is live if press 1 in console swicth_camera fucntion gets called but it does not switch to other the camera. So i want to know that how can we swicth the camera i am begineer in gstreaemer, so if possible please give me full code of swicth_camera to swicth the camera in runtime.
i am using gstreamer 1.16.3
#include <gst/gst.h>
#include <unistd.h>
static GMainLoop *loop;
GstElement *pipeline, *v4l2src1, *v4l2src2, *input_selector, *videoconvert1, *videoconvert2,
*x264enc, *h264parse, *flvmux, *rtmpsink,
*alsasrc, *audioconvert, *avenc_aac, *aacparse, *capsfilter1, *capsfilter2, *queue1, *queue2;
void switch_camera(char *key) {
gst_element_set_state(pipeline, GST_STATE_PAUSED);
GstPad *new_active_pad = gst_element_get_static_pad(queue2, "src");
if (new_active_pad) {
g_print("selected pad");
}
GstPad *old_active_pad = NULL;
g_object_get(G_OBJECT(input_selector), "active-pad", &old_active_pad, NULL);
if (old_active_pad != NULL) {
gst_object_unref(old_active_pad);
g_print("\nunref the old active pad");
} else {
g_print("\nNot able to unref");
}
g_print("\nbefore\n");
g_object_set(G_OBJECT(input_selector), "active-pad", new_active_pad, NULL);
g_print("\nafter\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);
}
gboolean check_keyboard_input(gpointer user_data) {
char key;
if (read(STDIN_FILENO, &key, 1) == 1) {
if (key == '1') {
switch_camera(&key);
} else if (key == '2') {
switch_camera(&key);
}
}
return TRUE;
}
int main(int argc, char *argv[]) {
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
pipeline = gst_pipeline_new("multi-camera-stream");
v4l2src1 = gst_element_factory_make("v4l2src", "source1");
v4l2src2 = gst_element_factory_make("v4l2src", "source2");
input_selector = gst_element_factory_make("input-selector", "selector");
videoconvert1 = gst_element_factory_make("videoconvert", "videoconvert1");
videoconvert2 = gst_element_factory_make("videoconvert", "videoconvert2");
x264enc = gst_element_factory_make("x264enc", "x264enc");
h264parse = gst_element_factory_make("h264parse", "h264parse");
flvmux = gst_element_factory_make("flvmux", "flvmux");
rtmpsink = gst_element_factory_make("rtmpsink", "rtmpsink");
alsasrc = gst_element_factory_make("alsasrc", "alsasrc");
audioconvert = gst_element_factory_make("audioconvert", "audioconvert");
avenc_aac = gst_element_factory_make("avenc_aac", "avenc_aac");
aacparse = gst_element_factory_make("aacparse", "aacparse");
capsfilter1 = gst_element_factory_make("capsfilter", "capsfilter1");
capsfilter2 = gst_element_factory_make("capsfilter", "capsfilter2");
GstElement *videoscale = gst_element_factory_make("videoscale", "videoscale");
// Create queue elements for buffer management
queue1 = gst_element_factory_make("queue", "q1");
queue2 = gst_element_factory_make("queue", "q2");
// Check if elements were created successfully
if (!pipeline || !v4l2src1 || !v4l2src2 || !input_selector || !videoconvert1 || !videoconvert2
|| !x264enc || !h264parse || !flvmux ||
!rtmpsink || !alsasrc || !audioconvert || !avenc_aac || !aacparse || !capsfilter1 ||
!capsfilter2 || !queue1 || !queue2) {
g_printerr("Not all elements could be created.\n");
return -1;
}
// Set the input source for video and audio
g_object_set(G_OBJECT(v4l2src1), "device", "/dev/video2", NULL);
g_object_set(G_OBJECT(v4l2src2), "device", "/dev/video4", NULL);
g_object_set(G_OBJECT(alsasrc), "device", "hw:0", NULL);
// Set the location for RTMP sink
//g_object_set(G_OBJECT(rtmpsink), "location", "rtmp://a.rtmp.youtube.com/live2/h22t-x1zz-
1x6f-8mqp-cy2k live=1", NULL);
g_object_set(G_OBJECT(rtmpsink), "location",
"rtmp://live.twitch.tv/app/live_995412749_duF1ntZdWZnLf4li3NcOSrD3MK0qJM live=1", NULL);
g_object_set(G_OBJECT(x264enc), "bitrate", 4000, NULL);
g_object_set(G_OBJECT(x264enc), "key-int-max", 30, NULL);
g_object_set(G_OBJECT(x264enc), "speed-preset", 1, NULL);
g_object_set(G_OBJECT(x264enc), "tune", 4, NULL);
// Set audio-related properties
g_object_set(G_OBJECT(avenc_aac), "bitrate", 128000, NULL);
// Set video caps for the capsfilter
GstCaps *caps1 = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "I420",
"framerate", GST_TYPE_FRACTION, 30, 1,
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
NULL);
// Set video caps for the capsfilter
GstCaps *caps2 = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "I420",
"framerate", GST_TYPE_FRACTION, 30, 1,
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
NULL);
g_object_set(G_OBJECT(capsfilter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(capsfilter2), "caps", caps2, NULL);
g_print("Running\n");
// Create a GIO channel to watch for keyboard input
GIOChannel *io_channel = g_io_channel_unix_new(STDIN_FILENO);
g_io_add_watch(io_channel, G_IO_IN, (GIOFunc)check_keyboard_input, NULL);
//gst_element_set_state(v4l2src1, GST_STATE_READY);
gst_bin_add_many(GST_BIN(pipeline), v4l2src1, v4l2src2, input_selector, videoscale,
videoconvert1, videoconvert2, x264enc, h264parse, flvmux,
rtmpsink, alsasrc, audioconvert, avenc_aac, aacparse, capsfilter1,
capsfilter2, queue1, queue2, NULL);
// Link v4l2src1 to input_selector
gst_element_link_many(v4l2src1, videoconvert1, capsfilter1, queue1, input_selector, NULL);
// Link v4l2src2 to input_selector
gst_element_link_many(v4l2src2, videoconvert2, capsfilter2, queue2, input_selector, NULL);
// Link input_selector to x264enc
gst_element_link_many(input_selector, x264enc, h264parse, flvmux, rtmpsink, NULL);
gst_element_link_many(alsasrc, audioconvert, avenc_aac, aacparse, flvmux, NULL);
guint numPads;
g_object_get(G_OBJECT(input_selector), "n-pads", &numPads, NULL);
g_print("Number of sink pads of input selector: %u\n", numPads);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_main_loop_run(loop);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(pipeline));
g_main_loop_unref(loop);
g_io_channel_unref(io_channel);
return 0;
}
Upvotes: 0
Views: 397
Reputation: 1626
You would have to use the sink pads of selector. You may try this simplified version of your code:
#include <gst/gst.h>
#include <unistd.h>
static GMainLoop *loop;
static GstElement *pipeline, *input_selector;
void switch_camera(char *key) {
GstPad *new_active_pad;
switch(*key) {
case '1':
{
new_active_pad = gst_element_get_static_pad(input_selector, "sink_0");
break;
}
case '2':
{
new_active_pad = gst_element_get_static_pad(input_selector, "sink_1");
break;
}
default:
return;
}
gst_element_set_state(pipeline, GST_STATE_PAUSED);
g_object_set(G_OBJECT(input_selector), "active-pad", new_active_pad, NULL);
gst_object_unref(new_active_pad);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
}
gboolean check_keyboard_input(gpointer user_data) {
char key;
if (read(STDIN_FILENO, &key, 1) == 1) {
if (key == '1') {
switch_camera(&key);
} else if (key == '2') {
switch_camera(&key);
}
}
return TRUE;
}
int main(int argc, char *argv[]) {
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
const char* pipelineStr = "input-selector name=selector ! videoconvert ! autovideosink videotestsrc ! selector.sink_0 videotestsrc pattern=ball ! selector.sink_1";
pipeline = gst_parse_launch(pipelineStr, NULL);
input_selector = gst_bin_get_by_name(GST_BIN(pipeline), "selector");
// Create a GIO channel to watch for keyboard input
GIOChannel *io_channel = g_io_channel_unix_new(STDIN_FILENO);
g_io_add_watch(io_channel, G_IO_IN, (GIOFunc)check_keyboard_input, NULL);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_main_loop_run(loop);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(pipeline));
g_main_loop_unref(loop);
g_io_channel_unref(io_channel);
return 0;
}
When built and running, in the terminal where you've launched the application, just type 1 or 2 followed by Enter, it should switch to requested input.
Upvotes: 0