gpt4 book ai didi

c++ - 如何正确使用 OpenH264 使用代码示例进行编码?

转载 作者:行者123 更新时间:2023-12-01 14:07:42 37 4
gpt4 key购买 nike

我有一个图像,想用 OpenH264 对其进行编码.

到目前为止,这是我从他们的 wiki 派生的代码:

#include <fstream>
#include <iterator>

#include <iostream>
#include <codec_api.h> //standard api for openh264

//additional libaries used by sample code
#include <codec_app_def.h>
#include <codec_def.h>
#include <codec_ver.h>
#include <assert.h>
#include <vector>
#include <cstring>
int main()
{
//parameter values
int width = 1920;
int height = 1080;
int framerate = 60;
int bitrate = 5000000;
int total_num = 500; //what does this value do?
//end parameter values

//Read in the File from bmp
std::vector<char> buf; //to store the image information

std::basic_ifstream<char> file("/home/megamol/Git/h264_sample/build/test.bmp", std::ios::binary); //opens bitstream to source
buf = std::vector<char>((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>()); // reads in data to the vector

std::cout << "sizeof buf: " << buf.size() << std::endl;

//Step 1: set up Encoder

ISVCEncoder* encoder_; //declaration of encoder pointer
int rv = WelsCreateSVCEncoder (&encoder_);

//Step 2: initialize with basic parameter

SEncParamBase param;
memset(&param, 0, sizeof (SEncParamBase));
param.iUsageType = EUsageType::SCREEN_CONTENT_REAL_TIME;
param.fMaxFrameRate = framerate;
param.iPicWidth = width;
param.iPicHeight = height;
param.iTargetBitrate = bitrate; //default value of example
encoder_->Initialize(&param);

//Step 3: set video format

int videoFormat = videoFormatI420;
encoder_->SetOption (ENCODER_OPTION_DATAFORMAT, &videoFormat);

//Step 4: encocode and store output bitstream
int frameSize = width * height * 3 / 2;
buf.resize(frameSize);

SFrameBSInfo info;
std::vector<char> compressedData;

memset (&info, 0, sizeof (SFrameBSInfo));
SSourcePicture pic;
memset (&pic, 0, sizeof (SSourcePicture));
pic.iPicWidth = width;
pic.iPicHeight = height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = pic.iPicWidth;
pic.iStride[1] = pic.iStride[2] = pic.iPicWidth >> 1;
pic.pData[0] = reinterpret_cast<unsigned char*>(&buf[0]);
pic.pData[1] = pic.pData[0] + width * height;
pic.pData[2] = pic.pData[1] + (width * height >> 2);

//encodes the frame
rv = encoder_->EncodeFrame (&pic, &info); // encodes the Frame
//encoding done encoded Frame should be stored in &info

//begin decoding block
ISVCDecoder *pSvcDecoder;
unsigned char *pBuf= &info;

return 0;
}

我不完全确定这是否是 OpenH264 的正确用法,但我也不确定如何正确测试它。

现在代码示例的文档很差。

什么是 BufferedData buf;例如?我知道那应该是输入,但那是什么类型?比如我如何将我的 test.bmp 加载为 BufferedData?我不认为我这样做是正确的。

我很困惑的另一件事是如何在编码后访问输出?在示例中它只是说 //output bitstream并没有将这个输出保存在任何地方。我以为输出是 info就像 codec_api.h 中所说的那样头文件:
  /**
* @brief Encode one frame
* @param kpSrcPic the pointer to the source luminance plane
* chrominance data:
* CbData = kpSrc + m_iMaxPicWidth * m_iMaxPicHeight;
* CrData = CbData + (m_iMaxPicWidth * m_iMaxPicHeight)/4;
* the application calling this interface needs to ensure the data validation between the location
* @param pBsInfo output bit stream
* @return 0 - success; otherwise -failed;
*/
virtual int EXTAPI EncodeFrame (const SSourcePicture* kpSrcPic, SFrameBSInfo* pBsInfo) = 0;

但显然它只保存有关输出的信息。我真的对这一切感到困惑。

最佳答案

基于 https://github.com/cisco/openh264/blob/master/codec/console/enc/src/welsenc.cpp

#include <codec_api.h>
#include <cassert>
#include <cstring>
#include <vector>
#include <fstream>
#include <iostream>

//Tested with OpenCV 3.3
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

using namespace std;
using namespace cv;

int main()
{
ISVCEncoder *encoder_ = nullptr;
int rv = WelsCreateSVCEncoder (&encoder_);
assert (0==rv);
assert (encoder_ != nullptr);

int width = 640;
int height = 480;
int total_num = 100;

SEncParamBase param;
memset (&param, 0, sizeof (SEncParamBase));
param.iUsageType = CAMERA_VIDEO_REAL_TIME;
param.fMaxFrameRate = 30;
param.iPicWidth = width;
param.iPicHeight = height;
param.iTargetBitrate = 5000000;
encoder_->Initialize (&param);

Mat image = imread("test.jpg", IMREAD_COLOR );
Mat imageResized, imageYuv, imageYuvMini;
resize(image, imageResized, Size(width, height));
Mat imageYuvCh[3], imageYuvMiniCh[3];
cvtColor(imageResized, imageYuv, cv::COLOR_BGR2YUV);
split(imageYuv, imageYuvCh);
resize(imageYuv, imageYuvMini, Size(width/2, height/2));
split(imageYuvMini, imageYuvMiniCh);

SFrameBSInfo info;
memset (&info, 0, sizeof (SFrameBSInfo));
SSourcePicture pic;
memset (&pic, 0, sizeof (SSourcePicture));
pic.iPicWidth = width;
pic.iPicHeight = height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = imageYuvCh[0].step;
pic.iStride[1] = imageYuvMiniCh[1].step;
pic.iStride[2] = imageYuvMiniCh[2].step;
pic.pData[0] = imageYuvCh[0].data;
pic.pData[1] = imageYuvMiniCh[1].data;
pic.pData[2] = imageYuvMiniCh[2].data;

ofstream outFi;
outFi.open ("test.264", ios::out | ios::binary);

for(int num = 0; num<total_num; num++)
{
//prepare input data
rv = encoder_->EncodeFrame (&pic, &info);
assert (rv == cmResultSuccess);
if (info.eFrameType != videoFrameTypeSkip /*&& cbk != nullptr*/)
{
//output bitstream
for (int iLayer=0; iLayer < info.iLayerNum; iLayer++)
{
SLayerBSInfo* pLayerBsInfo = &info.sLayerInfo[iLayer];

int iLayerSize = 0;
int iNalIdx = pLayerBsInfo->iNalCount - 1;
do {
iLayerSize += pLayerBsInfo->pNalLengthInByte[iNalIdx];
--iNalIdx;
} while (iNalIdx >= 0);

unsigned char *outBuf = pLayerBsInfo->pBsBuf;
outFi.write((char *)outBuf, iLayerSize);
}

}
}

if (encoder_) {
encoder_->Uninitialize();
WelsDestroySVCEncoder (encoder_);
}

outFi.close();
}

关于c++ - 如何正确使用 OpenH264 使用代码示例进行编码?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/51287413/

37 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com