gpt4 book ai didi

c++ - Live555:基于 "testOnDemandRTSPServer"的X264码流直播源码

转载 作者:塔克拉玛干 更新时间:2023-11-02 23:57:51 24 4
gpt4 key购买 nike

我正在尝试创建一个 rtsp 服务器来流式传输我程序的 OpenGL 输出。我看了看 How to write a Live555 FramedSource to allow me to stream H.264 live ,但我需要流是单播的。所以我看了一下 testOnDemandRTSPServer。使用相同的代码失败。据我了解,我需要提供存储我的 h264 帧的内存,以便 OnDemandServer 可以按需读取它们。

H264VideoStreamServerMediaSubsession.cpp

H264VideoStreamServerMediaSubsession*
H264VideoStreamServerMediaSubsession::createNew(UsageEnvironment& env,
Boolean reuseFirstSource) {
return new H264VideoStreamServerMediaSubsession(env, reuseFirstSource);
}

H264VideoStreamServerMediaSubsession::H264VideoStreamServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource)
: OnDemandServerMediaSubsession(env, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
}

H264VideoStreamServerMediaSubsession::~H264VideoStreamServerMediaSubsession() {
delete[] fAuxSDPLine;
}

static void afterPlayingDummy(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->afterPlayingDummy1();
}

void H264VideoStreamServerMediaSubsession::afterPlayingDummy1() {
// Unschedule any pending 'checking' task:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
// Signal the event loop that we're done:
setDoneFlag();
}

static void checkForAuxSDPLine(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->checkForAuxSDPLine1();
}

void H264VideoStreamServerMediaSubsession::checkForAuxSDPLine1() {
char const* dasl;

if (fAuxSDPLine != NULL) {
// Signal the event loop that we're done:
setDoneFlag();
} else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
fAuxSDPLine = strDup(dasl);
fDummyRTPSink = NULL;

// Signal the event loop that we're done:
setDoneFlag();
} else {
// try again after a brief delay:
int uSecsToDelay = 100000; // 100 ms
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
(TaskFunc*)checkForAuxSDPLine, this);
}
}

char const* H264VideoStreamServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)

if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
// Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
// until we start reading the file. This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
// and we need to start reading data from our file until this changes.
fDummyRTPSink = rtpSink;

// Start reading the file:
fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);

// Check whether the sink's 'auxSDPLine()' is ready:
checkForAuxSDPLine(this);
}

envir().taskScheduler().doEventLoop(&fDoneFlag);

return fAuxSDPLine;
}

FramedSource* H264VideoStreamServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = 500; // kb
megamol::remotecontrol::View3D_MRC *parent = (megamol::remotecontrol::View3D_MRC*)this->parent;
return H264VideoStreamFramer::createNew(envir(), parent->h264FramedSource);
}

RTPSink* H264VideoStreamServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}

框架源.cpp

H264FramedSource* H264FramedSource::createNew(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
{
return new H264FramedSource(env, preferredFrameSize, playTimePerFrame);
}

H264FramedSource::H264FramedSource(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
: FramedSource(env),
fPreferredFrameSize(fMaxSize),
fPlayTimePerFrame(playTimePerFrame),
fLastPlayTime(0),
fCurIndex(0)
{

x264_param_default_preset(&param, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = 1024;
param.i_height = 768;
param.i_fps_num = 30;
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = 60;
param.b_intra_refresh = 1;
//Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
param.i_sps_id = 7;
//For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");

param.i_log_level = X264_LOG_ERROR;

encoder = x264_encoder_open(&param);
pic_in.i_type = X264_TYPE_AUTO;
pic_in.i_qpplus1 = 0;
pic_in.img.i_csp = X264_CSP_I420;
pic_in.img.i_plane = 3;


x264_picture_alloc(&pic_in, X264_CSP_I420, 1024, 768);

convertCtx = sws_getContext(1024, 768, PIX_FMT_RGBA, 1024, 768, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}

H264FramedSource::~H264FramedSource()
{
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}

void H264FramedSource::AddToBuffer(uint8_t* buf, int surfaceSizeInBytes)
{
uint8_t* surfaceData = (new uint8_t[surfaceSizeInBytes]);

memcpy(surfaceData, buf, surfaceSizeInBytes);

int srcstride = 1024*4;
sws_scale(convertCtx, &surfaceData, &srcstride,0, 768, pic_in.img.plane, pic_in.img.i_stride);
x264_nal_t* nals = NULL;
int i_nals = 0;
int frame_size = -1;


frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);

static bool finished = false;

if (frame_size >= 0)
{
static bool alreadydone = false;
if(!alreadydone)
{

x264_encoder_headers(encoder, &nals, &i_nals);
alreadydone = true;
}
for(int i = 0; i < i_nals; ++i)
{
m_queue.push(nals[i]);
}
}
delete [] surfaceData;
surfaceData = nullptr;

envir().taskScheduler().triggerEvent(eventTriggerId, this);
}

void H264FramedSource::doGetNextFrame()
{
deliverFrame();
}

void H264FramedSource::deliverFrame0(void* clientData)
{
((H264FramedSource*)clientData)->deliverFrame();
}

void H264FramedSource::deliverFrame()
{
x264_nal_t nalToDeliver;

if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}

// Remember the play time of this data:
fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
fDurationInMicroseconds = fLastPlayTime;
} else {
// We don't know a specific play time duration for this data,
// so just record the current time as being the 'presentation time':
gettimeofday(&fPresentationTime, NULL);
}

if(!m_queue.empty())
{
m_queue.wait_and_pop(nalToDeliver);

uint8_t* newFrameDataStart = (uint8_t*)0xD15EA5E;

newFrameDataStart = (uint8_t*)(nalToDeliver.p_payload);
unsigned newFrameSize = nalToDeliver.i_payload;

// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
}
else {
fFrameSize = newFrameSize;
}

memcpy(fTo, nalToDeliver.p_payload, nalToDeliver.i_payload);

FramedSource::afterGetting(this);
}
}

RTSP-Server Therad的相关部分

  RTSPServer* rtspServer = RTSPServer::createNew(*(parent->env), 8554, NULL);
if (rtspServer == NULL) {
*(parent->env) << "Failed to create RTSP server: " << (parent->env)->getResultMsg() << "\n";
exit(1);
}
char const* streamName = "Stream";
parent->h264FramedSource = H264FramedSource::createNew(*(parent->env), 0, 0);
H264VideoStreamServerMediaSubsession *h264VideoStreamServerMediaSubsession = H264VideoStreamServerMediaSubsession::createNew(*(parent->env), true);
h264VideoStreamServerMediaSubsession->parent = parent;
sms->addSubsession(h264VideoStreamServerMediaSubsession);
rtspServer->addServerMediaSession(sms);

parent->env->taskScheduler().doEventLoop(); // does not return

一旦连接存在渲染循环调用

h264FramedSource->AddToBuffer(videoData, 1024*768*4);

最佳答案

您必须做的第一件事是围绕 x264 编码器编写一个包装器,您可以使用它通过良好的给定接口(interface)对 RGB 数据进行编码。下面的类(class)将告诉你如何做到这一点。我已经使用此类对从 opencv 捕获中获取的 RAW BGR 帧进行编码。

x264Encoder.h

#ifdef __cplusplus
#define __STDINT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include <iostream>
#include <concurrent_queue.h>
#include "opencv2\opencv.hpp"
#include <queue>
#include <stdint.h>
extern "C" {
#include "x264\x264.h"
}

class x264Encoder
{
public:
x264Encoder(void);
~x264Encoder(void);

public:
void initilize();
void unInitilize();
void encodeFrame(cv::Mat& image);
bool isNalsAvailableInOutputQueue();
x264_nal_t getNalUnit();
private:
// Use this context to convert your BGR Image to YUV image since x264 do not support RGB input
SwsContext* convertContext;
std::queue<x264_nal_t> outputQueue;
x264_param_t parameters;
x264_picture_t picture_in,picture_out;
x264_t* encoder;
};

x264Encoder.cpp

#include "x264Encoder.h"


x264Encoder::x264Encoder(void)
{

}


x264Encoder::~x264Encoder(void)
{

}

void x264Encoder::initilize()
{
x264_param_default_preset(&parameters, "veryfast", "zerolatency");
parameters.i_log_level = X264_LOG_INFO;
parameters.i_threads = 1;
parameters.i_width = 640;
parameters.i_height = 480;
parameters.i_fps_num = 25;
parameters.i_fps_den = 1;
parameters.i_keyint_max = 25;
parameters.b_intra_refresh = 1;
parameters.rc.i_rc_method = X264_RC_CRF;
parameters.rc.i_vbv_buffer_size = 1000000;
parameters.rc.i_vbv_max_bitrate = 90000;
parameters.rc.f_rf_constant = 25;
parameters.rc.f_rf_constant_max = 35;
parameters.i_sps_id = 7;
// the following two value you should keep 1
parameters.b_repeat_headers = 1; // to get header before every I-Frame
parameters.b_annexb = 1; // put start code in front of nal. we will remove start code later
x264_param_apply_profile(&parameters, "baseline");

encoder = x264_encoder_open(&parameters);
x264_picture_alloc(&picture_in, X264_CSP_I420, parameters.i_width, parameters.i_height);
picture_in.i_type = X264_TYPE_AUTO;
picture_in.img.i_csp = X264_CSP_I420;
// i have initilized my color space converter for BGR24 to YUV420 because my opencv video capture gives BGR24 image. You can initilize according to your input pixelFormat
convertContext = sws_getContext(parameters.i_width,parameters.i_height, PIX_FMT_BGR24, parameters.i_width,parameters.i_height,PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
}

void x264Encoder::unInitilize()
{
x264_encoder_close(encoder);
sws_freeContext(convertContext);
}

void x264Encoder::encodeFrame(cv::Mat& image)
{
int srcStride = parameters.i_width * 3;
sws_scale(convertContext, &(image.data), &srcStride, 0, parameters.i_height, picture_in.img.plane, picture_in.img.i_stride);
x264_nal_t* nals ;
int i_nals = 0;
int frameSize = -1;

frameSize = x264_encoder_encode(encoder, &nals, &i_nals, &picture_in, &picture_out);
if(frameSize > 0)
{
for(int i = 0; i< i_nals; i++)
{
outputQueue.push(nals[i]);
}
}
}

bool x264Encoder::isNalsAvailableInOutputQueue()
{
if(outputQueue.empty() == true)
{
return false;
}
else
{
return true;
}
}

x264_nal_t x264Encoder::getNalUnit()
{
x264_nal_t nal;
nal = outputQueue.front();
outputQueue.pop();
return nal;
}

现在我们有了编码器,可以拍摄 BGR 图片并对其进行编码。我的编码器将对帧进行编码并将所有输出 nals 放入将由 Live555 流式传输的输出队列中。要实现实时视频源,您必须创建两个类,它们将是(OnDemandServerMediaSubsession 的子类和另一个 FramedSource 的子类)的子类。两者都有 live555 媒体库。此类还将为多个客户端提供数据。
要创建这两个类的子类,您可以引用以下类。

H264LiveServerMediaSession.h(OnDemandServerMediaSubsession 的子类)

#include "liveMedia.hh"
#include "OnDemandServerMediaSubsession.hh"
#include "LiveSourceWithx264.h"

class H264LiveServerMediaSession:public OnDemandServerMediaSubsession
{
public:
static H264LiveServerMediaSession* createNew(UsageEnvironment& env, bool reuseFirstSource);
void checkForAuxSDPLine1();
void afterPlayingDummy1();
protected:
H264LiveServerMediaSession(UsageEnvironment& env, bool reuseFirstSource);
virtual ~H264LiveServerMediaSession(void);
void setDoneFlag() { fDoneFlag = ~0; }
protected:
virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource);
virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
private:
char* fAuxSDPLine;
char fDoneFlag;
RTPSink* fDummySink;
};

H264LiveServerMediaSession.cpp

#include "H264LiveServerMediaSession.h"


H264LiveServerMediaSession* H264LiveServerMediaSession::createNew(UsageEnvironment& env, bool reuseFirstSource)
{
return new H264LiveServerMediaSession(env, reuseFirstSource);
}

H264LiveServerMediaSession::H264LiveServerMediaSession(UsageEnvironment& env, bool reuseFirstSource):OnDemandServerMediaSubsession(env,reuseFirstSource),fAuxSDPLine(NULL), fDoneFlag(0), fDummySink(NULL)
{

}


H264LiveServerMediaSession::~H264LiveServerMediaSession(void)
{
delete[] fAuxSDPLine;
}


static void afterPlayingDummy(void* clientData)
{
H264LiveServerMediaSession *session = (H264LiveServerMediaSession*)clientData;
session->afterPlayingDummy1();
}

void H264LiveServerMediaSession::afterPlayingDummy1()
{
envir().taskScheduler().unscheduleDelayedTask(nextTask());
setDoneFlag();
}

static void checkForAuxSDPLine(void* clientData)
{
H264LiveServerMediaSession* session = (H264LiveServerMediaSession*)clientData;
session->checkForAuxSDPLine1();
}

void H264LiveServerMediaSession::checkForAuxSDPLine1()
{
char const* dasl;
if(fAuxSDPLine != NULL)
{
setDoneFlag();
}
else if(fDummySink != NULL && (dasl = fDummySink->auxSDPLine()) != NULL)
{
fAuxSDPLine = strDup(dasl);
fDummySink = NULL;
setDoneFlag();
}
else
{
int uSecsDelay = 100000;
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsDelay, (TaskFunc*)checkForAuxSDPLine, this);
}
}

char const* H264LiveServerMediaSession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource)
{
if(fAuxSDPLine != NULL) return fAuxSDPLine;
if(fDummySink == NULL)
{
fDummySink = rtpSink;
fDummySink->startPlaying(*inputSource, afterPlayingDummy, this);
checkForAuxSDPLine(this);
}

envir().taskScheduler().doEventLoop(&fDoneFlag);
return fAuxSDPLine;
}

FramedSource* H264LiveServerMediaSession::createNewStreamSource(unsigned clientSessionID, unsigned& estBitRate)
{
// Based on encoder configuration i kept it 90000
estBitRate = 90000;
LiveSourceWithx264 *source = LiveSourceWithx264::createNew(envir());
// are you trying to keep the reference of the source somewhere? you shouldn't.
// Live555 will create and delete this class object many times. if you store it somewhere
// you will get memory access violation. instead you should configure you source to always read from your data source
return H264VideoStreamDiscreteFramer::createNew(envir(),source);
}

RTPSink* H264LiveServerMediaSession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource)
{
return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}

现在我们必须继承 LiveMedia 中的 FramedSource 类。模型可以引用 live555 库中的 DeviceSource.cpp。下面将展示我是如何做到的。
LiveSourceWithx264.h

#include <queue>
#include "x264Encoder.h"
#include "opencv2\opencv.hpp"

class LiveSourceWithx264:public FramedSource
{
public:
static LiveSourceWithx264* createNew(UsageEnvironment& env);
static EventTriggerId eventTriggerId;
protected:
LiveSourceWithx264(UsageEnvironment& env);
virtual ~LiveSourceWithx264(void);
private:
virtual void doGetNextFrame();
static void deliverFrame0(void* clientData);
void deliverFrame();
void encodeNewFrame();
static unsigned referenceCount;
std::queue<x264_nal_t> nalQueue;
timeval currentTime;
// videoCaptureDevice is my BGR data source. You can have according to your need
cv::VideoCapture videoCaptureDevice;
cv::Mat rawImage;
// Remember the x264 encoder wrapper we wrote in the start
x264Encoder *encoder;
};

LiveSourceWithx264.cpp

#include "LiveSourceWithx264.h"


LiveSourceWithx264* LiveSourceWithx264::createNew(UsageEnvironment& env)
{
return new LiveSourceWithx264(env);
}

EventTriggerId LiveSourceWithx264::eventTriggerId = 0;

unsigned LiveSourceWithx264::referenceCount = 0;

LiveSourceWithx264::LiveSourceWithx264(UsageEnvironment& env):FramedSource(env)
{
if(referenceCount == 0)
{

}
++referenceCount;
videoCaptureDevice.open(0);
encoder = new x264Encoder();
encoder->initilize();
if(eventTriggerId == 0)
{
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}


LiveSourceWithx264::~LiveSourceWithx264(void)
{
--referenceCount;
videoCaptureDevice.release();
encoder->unInitilize();
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}

void LiveSourceWithx264::encodeNewFrame()
{
rawImage.data = NULL;
while(rawImage.data == NULL)
{
videoCaptureDevice >> rawImage;
cv::waitKey(100);
}
// Got new image to stream
assert(rawImage.data != NULL);
encoder->encodeFrame(rawImage);
// Take all nals from encoder output queue to our input queue
while(encoder->isNalsAvailableInOutputQueue() == true)
{
x264_nal_t nal = encoder->getNalUnit();
nalQueue.push(nal);
}
}

void LiveSourceWithx264::deliverFrame0(void* clientData)
{
((LiveSourceWithx264*)clientData)->deliverFrame();
}

void LiveSourceWithx264::doGetNextFrame()
{
if(nalQueue.empty() == true)
{
encodeNewFrame();
gettimeofday(&currentTime,NULL);
deliverFrame();
}
else
{
deliverFrame();
}
}

void LiveSourceWithx264::deliverFrame()
{
if(!isCurrentlyAwaitingData()) return;
x264_nal_t nal = nalQueue.front();
nalQueue.pop();
assert(nal.p_payload != NULL);
// You need to remove the start code which is there in front of every nal unit.
// the start code might be 0x00000001 or 0x000001. so detect it and remove it. pass remaining data to live555
int trancate = 0;
if (nal.i_payload >= 4 && nal.p_payload[0] == 0 && nal.p_payload[1] == 0 && nal.p_payload[2] == 0 && nal.p_payload[3] == 1 )
{
trancate = 4;
}
else
{
if(nal.i_payload >= 3 && nal.p_payload[0] == 0 && nal.p_payload[1] == 0 && nal.p_payload[2] == 1 )
{
trancate = 3;
}
}

if(nal.i_payload-trancate > fMaxSize)
{
fFrameSize = fMaxSize;
fNumTruncatedBytes = nal.i_payload-trancate - fMaxSize;
}
else
{
fFrameSize = nal.i_payload-trancate;
}
fPresentationTime = currentTime;
memmove(fTo,nal.p_payload+trancate,fFrameSize);
FramedSource::afterGetting(this);
}

现在我们完成了类的实现。现在要进行流式传输设置,您可以按照与 testOnDemandRTSPServer.cpp 示例相同的方式进行操作。这是我进行设置的主要位置

#include <iostream>
#include <liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include "H264LiveServerMediaSession.h"
#include "opencv2\opencv.hpp"
#include "x264Encoder.h"
int main(int argc, char* argv[])
{
TaskScheduler* taskSchedular = BasicTaskScheduler::createNew();
BasicUsageEnvironment* usageEnvironment = BasicUsageEnvironment::createNew(*taskSchedular);
RTSPServer* rtspServer = RTSPServer::createNew(*usageEnvironment, 8554, NULL);
if(rtspServer == NULL)
{
*usageEnvironment << "Failed to create rtsp server ::" << usageEnvironment->getResultMsg() <<"\n";
exit(1);
}

std::string streamName = "usb1";
ServerMediaSession* sms = ServerMediaSession::createNew(*usageEnvironment, streamName.c_str(), streamName.c_str(), "Live H264 Stream");
H264LiveServerMediaSession *liveSubSession = H264LiveServerMediaSession::createNew(*usageEnvironment, true);
sms->addSubsession(liveSubSession);
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
*usageEnvironment << "Play the stream using url "<<url << "\n";
delete[] url;
taskSchedular->doEventLoop();
return 0;
}

并且您有您的 LiveSource 的 URL。我有我的 USB 摄像头 :)

关于c++ - Live555:基于 "testOnDemandRTSPServer"的X264码流直播源码,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/19427576/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com