gpt4 book ai didi

c - 通过 RTSP 传输视频和音频

转载 作者:塔克拉玛干 更新时间:2023-11-02 23:13:40 25 4
gpt4 key购买 nike

我需要实现允许连接到两种类型的客户端的服务器。第一种类型必须将实时视频和音频流式传输到服务器。第二种类型必须从服务器流式传输此视频。我为视频选择了 h.264 编码,为音频选择了 vorbis ogg 编码。我想使用 RTSP 协议(protocol)将视频从第一类客户端流式传输到服务器。我已经使用 ffmpeg 中的“libavformat”实现了客户端部分。下面给出了我的代码。

#include "v_out_video_stream.h"

#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/avstring.h>
#include <libavformat/avio.h>
#ifdef __cplusplus
}
#endif
#include <stdexcept>

struct VStatticRegistrar
{
VStatticRegistrar( )
{
av_register_all( );
avformat_network_init( );
}
};

VStatticRegistrar __registrar;

struct VOutVideoStream::Private
{
AVFormatContext * m_context;
int m_audioStreamIndex;
int m_videoStreamIndex;

int m_videoBitrate;
int m_width;
int m_height;
int m_fps;
int m_audioSamplerate;
};

VOutVideoStream::VOutVideoStream( int videoBitrate, int width, int height, int fps, int audioSamplerate )
{
d = new Private( );
d->m_videoBitrate = videoBitrate;
d->m_width = width;
d->m_height = height;
d->m_fps = fps;
d->m_audioSamplerate = audioSamplerate;
d->m_context = 0;
d->m_audioStreamIndex = -1;
d->m_videoStreamIndex = -1;
}

bool VOutVideoStream::connectToServer( const std::string& rtp_address, int rtp_port )
{
assert( ! d->m_context );

// initalize the AV context
d->m_context = avformat_alloc_context();
if( !d->m_context )
return false;
// get the output format
d->m_context->oformat = av_guess_format( "rtsp", NULL, NULL );
if( ! d->m_context->oformat )
return false;

// try to open the RTSP stream
snprintf( d->m_context->filename, sizeof( d->m_context->filename ), "rtsp://%s:%d", rtp_address.c_str(), rtp_port );
if( avio_open( &d->m_context->pb, d->m_context->filename, AVIO_FLAG_WRITE ) < 0 )
return false;

// add an H.264 stream
AVStream *stream = avformat_new_stream( d->m_context, NULL );
if ( ! stream )
return false;
// initalize codec
AVCodecContext* codec = stream->codec;
if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
codec->codec_id = CODEC_ID_H264;
codec->codec_type = AVMEDIA_TYPE_VIDEO;
//codec->bit_rate = d->m_videoBitrate;
codec->width = d->m_width;
codec->height = d->m_height;
codec->time_base.den = d->m_fps;
codec->time_base.num = 1;
d->m_audioStreamIndex = stream->index;

stream = avformat_new_stream( d->m_context, NULL );
if ( ! stream )
return false;
// initalize codec
codec = stream->codec;
if( d->m_context->oformat->flags & AVFMT_GLOBALHEADER )
codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
codec->codec_id = CODEC_ID_VORBIS;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->sample_fmt = AV_SAMPLE_FMT_S16;
codec->channels = 2;
codec->bit_rate = d->m_audioSamplerate * codec->channels * 16;
codec->sample_rate = d->m_audioSamplerate;
d->m_videoStreamIndex = stream->index;
// write the header
return avformat_write_header( d->m_context, NULL ) == 0;
}

void VOutVideoStream::disconnect( )
{
assert( d->m_context );

avio_close( d->m_context->pb );
avformat_free_context( d->m_context );
d->m_context = 0;
}

VOutVideoStream::~VOutVideoStream( )
{
if( d->m_context )
disconnect( );
delete d;
}

bool VOutVideoStream::send( VNetworkAbstractionLayer& nal )
{
AVPacket p;
av_init_packet( &p );
p.data = nal.getPayload( );
p.size = nal.getSize( );
p.stream_index = nal.getType( ) == VNetworkAbstractionLayer::AUDIO_PACKET ? d->m_audioStreamIndex :
d->m_videoStreamIndex;
return av_write_frame( d->m_context, &p ) >= 0;
}

VNetworkAbstractionLayer 定义如下:

#ifndef _V_NETWORK_ABSTRACTION_LAYER_H_
#define _V_NETWORK_ABSTRACTION_LAYER_H_

#include <cs/v_cs_global.h>

#include <stdint.h>
#include <cstring>
#include <boost/noncopyable.hpp>
#include <boost/enable_shared_from_this.hpp>

class VNetworkAbstractionLayer : public boost::enable_shared_from_this<VNetworkAbstractionLayer>,
private boost::noncopyable
{
public:
enum PacketType
{
AUDIO_PACKET,
VIDEO_PACKET
};

~VNetworkAbstractionLayer( ) {
delete[] m_payload;
}

static VNetworkAbstractionLayerPtr factory( int size, const uint8_t* payload, PacketType type ) {
return VNetworkAbstractionLayerPtr( new VNetworkAbstractionLayer( size, payload, type ) );
}

uint8_t* getPayload( ) {
return m_payload;
}
int getSize( ) const {
return m_size;
}
PacketType getType( ) const {
return m_type;
}

private:
VNetworkAbstractionLayer( int size, const uint8_t* payload, PacketType type ) :
m_size( size ),
m_payload( new uint8_t[ size ] ),
m_type( type )
{
memcpy( m_payload, payload, size );
}

int m_size;
uint8_t *m_payload;
PacketType m_type;
};


#endif // _V_NETWORK_ABSTRACTION_LAYER_H_

现在我需要实现服务器。但我没有在 libavformat 中找到任何“监听”方法。谁能解释一下如何实现 RTSP 服务器。我可以使用任何其他库吗?

最佳答案

在这方面我会选择 GStreamer。有很多examples在那里。

关于c - 通过 RTSP 传输视频和音频,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/12432231/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com