gpt4 book ai didi

播放 avi 文件的 gstreamer 代码挂起

转载 作者:行者123 更新时间:2023-12-03 07:59:23 27 4
gpt4 key购买 nike

我是 gstremaer 的新手。我已经编写了使用 gstreamer 播放 avi 文件的代码。但是在执行代码时它只是在一段时间后挂起,我无法调试是什么问题,有人可以帮助我。
代码和输出如下:

    Code:

#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>

//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);

//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);

GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,

int main(int argc,char* argv[])
{

GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;

gst_init (&argc,&argv);

Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state

pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(pipeline);


source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source),"location",argv[1],NULL);

demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");

if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || !video_convertor || !audio_sink || !video_sink )
{ g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);

gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);

g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline,GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.

g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline,GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}



//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data)
{
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;

g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;

}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}

//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");

audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");

}



void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad *videoconvertsink;
GstPad *audioconvertsink ;
g_print(" In dynamic_decodepad ADDING PAD\n");

videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
gst_pad_link(pad,videoconvertsink);
audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
gst_pad_link(pad,audioconvertsink );
g_print(" In dynamic_decodepad ADDING PAD2\n");

}


Output:
In playing state
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 8192
default
In dynamic ADDING PAD
In dynamic ADDING PAD2
In dynamic ADDING PAD
In dynamic ADDING PAD2
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In dynamic_decodepad ADDING PAD
In dynamic_decodepad ADDING PAD2
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED

它在这一点上挂起。
任何帮助表示赞赏。
提前致谢。

最佳答案

你的代码在几个方面是错误的,这就是为什么我的答案这么长。

首先,gst_pipeline_new返回 GstElement*不是 GstPipeline* :

-  pipeline = gst_pipeline_new("PIPELINE");
+ GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
- bus = gst_pipeline_get_bus(pipeline);
+ bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));

然后,您的管道是错误的:您试图用一个 decodebin 解码两个流(音频和视频)。但你需要两个。创建它,不要忘记将它添加到 bin:
   videoqueue = gst_element_factory_make("queue","Queue for video");
+ audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");

- gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
+ gst_bin_add_many(
+ Bin,
+ demuxer,
+ audioqueue,videoqueue,
+ audio_decoder,audio_convertor,
+ video_decoder,video_convertor,
+ audio_sink,video_sink,
+ NULL);

而且,顺便说一句,最好使用 decodebin2decodebin已弃用。

然后动态链接一些元素:demuxer 到 queue 和 decodebin 到转换器。因此,您不应该使用 gst_element_link_many 在 decodebin 和转换器之间创建链接。 :
   gst_element_link(source,demuxer);
- gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
- gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
+ gst_element_link_many(audioqueue,audio_decoder,NULL);
+ gst_element_link_many(audio_convertor,audio_sink,NULL);
+ gst_element_link_many(videoqueue,video_decoder,NULL);
+ gst_element_link_many(video_convertor,video_sink,NULL);

当然,正如我们添加的 audio_decoder decodebin,我们需要处理它的 pad 创建信号:
+  g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);

现在我们到了最有趣的部分。
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");

audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");
}

这是完全错误的! dynamic_addpad在每个打击垫创建时调用。 avidemux通常会创建两个打击垫(每个数据流一个):“audio_00”和“video_00”。所以, dynamic_addpad将被调用两次,我们需要根据焊盘名称区分要链接的内容:
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);

if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
dynamic_decodepad 几乎相同.因为它只有一个 src pad 是由 decodebin 创建的,所以为 video_decoder 创建单独的处理程序会更容易和 audio_decoder .
但出于教学原因,我将在一个功能中完成。现在我们可以通过它的大写来区分哪个元素连接到焊盘。
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}

GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
gst_pad_can_linkdynamic_addpath 中不起作用因为可以连接 query元素同时添加到“audio_00”和“video_00”。

就是这样。如果您有其他问题,请随时提问。

关于播放 avi 文件的 gstreamer 代码挂起,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/8396127/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com