gpt4 book ai didi

c++ - 如何从 GL_RGB 转换为 AVFrame

转载 作者:行者123 更新时间:2023-12-04 23:06:42 28 4
gpt4 key购买 nike

对于我的项目,我需要转换由 GL_RGB 生成的 RGB ( glReadPixels ) 图像变成 AVFrame .我用谷歌搜索了它,发现只是相反的例子。但在这种情况下,我需要来自 GL_RGBAVFrame .
这是我的代码:
这是我设置编解码器的方式:

/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(AVCodecID codec_id, int fps, int width, int height) {
const AVCodec *codec;
int ret;
codec = avcodec_find_encoder(codec_id);
if (!codec ) {
std::cerr << "Codec not found" << std::endl;
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
std::cerr << "Could not allocate video codec context" << std::endl;
exit(1);
}
c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base.num = 1;
c->time_base.den = fps;
c->keyint_min = 600;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (avcodec_open2(c, codec, NULL) < 0) {
std::cerr << "Could not open codec" << std::endl;
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
std::cerr << "Could not allocate video frame" << std::endl;
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
std::cerr << "Could not allocate raw picture buffer" << std::endl;
exit(1);
}
}
获取像素并设置新帧:
BYTE* pixels = new BYTE[3 * DEFAULT_MONITOR.maxResolution.width * DEFAULT_MONITOR.maxResolution.height];

glReadPixels(0, 0, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
screenSrc->setNextFrame(pixels, DEFAULT_MONITOR.maxResolution.width, DEFAULT_MONITOR.maxResolution.height);
以及我用于转换的功能:
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 3 * c->width };
sws_context = sws_getCachedContext(sws_context,
c->width, c->height, AV_PIX_FMT_RGB24,
c->width, c->height, AV_PIX_FMT_YUV420P,
0, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
c->height, frame->data, frame->linesize);
}
所有代码都可以找到 here
Here是导致段错误的行。
不幸的是,该函数给了我一个段错误。你知道如何解决这个问题吗?

最佳答案

sws_scale 的第二个参数是一个指针数组:const uint8_t *const srcSlice[]而不是指针转换:(const uint8_t * const *)&rgb , 地点 rgb指针数组中的指针:

const uint8_t* const src_data[] = { rgb };

sws_scale(sws_context, src_data, in_linesize, 0,
c->height, frame->data, frame->linesize);

RGB转YUV420P的例子:
#include <stdio.h>
#include <string.h>
#include <stdint.h>

extern "C"
{
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}


int main()
{
//Use FFmpeg for building raw RGB image (used as input).
//ffmpeg -y -f lavfi -i testsrc=size=192x108:rate=1 -vcodec rawvideo -pix_fmt rgb24 -frames 1 -f rawvideo rgb_image.bin

const int width = 192;
const int height = 108;
unsigned char* rgb_in = new uint8_t[width * height * 3];

const enum AVPixelFormat out_pix_fmt = AV_PIX_FMT_YUV420P;

//Read input image for binary file (for testing)
////////////////////////////////////////////////////////////////////////////
FILE* f = fopen("rgb_image.bin", "rb"); //For using fopen in Visual Studio, define: _CRT_SECURE_NO_WARNINGS (or use fopen_s).
fread(rgb_in, 1, width * height * 3, f);
fclose(f);
////////////////////////////////////////////////////////////////////////////

//Allocate output buffers:
////////////////////////////////////////////////////////////////////////////
// YUV420p data is separated in three planes
// 1. Y - intensity plane, resolution: width x height
// 2. U - Color plane, resolution: width/2 x height/2
// 3. V - Color plane, resolution: width/2 x height/2

int out_linesize[4] = {0, 0, 0, 0};
uint8_t* out_planes[4] = { nullptr, nullptr, nullptr, nullptr };

int sts = av_image_alloc(out_planes, //uint8_t * pointers[4],
out_linesize, //int linesizes[4],
width, //int w,
height, //int h,
out_pix_fmt, //enum AVPixelFormat pix_fmt,
32); //int align); //Align to 32 bytes address may result faster execution time compared to 1 byte aligenment.

if (sts < 0)
{
printf("Error: av_image_alloc response = %d\n", sts);
return -1;
}
////////////////////////////////////////////////////////////////////////////



//Get SWS context
////////////////////////////////////////////////////////////////////////////
struct SwsContext* sws_context = nullptr;

sws_context = sws_getCachedContext(sws_context, //struct SwsContext *context,
width, //int srcW,
height, //int srcH,
AV_PIX_FMT_RGB24, //enum AVPixelFormat srcFormat,
width, //int dstW,
height, //int dstH,
out_pix_fmt, //enum AVPixelFormat dstFormat,
SWS_BILINEAR, //int flags,
nullptr, //SwsFilter *srcFilter,
nullptr, //SwsFilter *dstFilter,
nullptr); //const double *param);

if (sws_context == nullptr)
{
printf("Error: sws_getCachedContext returned nullptr\n");
return -1;
}
////////////////////////////////////////////////////////////////////////////


//Apply color conversion
////////////////////////////////////////////////////////////////////////////
const int in_linesize[1] = { 3 * width }; // RGB stride
const uint8_t* in_planes[1] = { rgb_in };

int response = sws_scale(sws_context, //struct SwsContext *c,
in_planes, //const uint8_t *const srcSlice[],
in_linesize, //const int srcStride[],
0, //int srcSliceY,
height, //int srcSliceH,
out_planes, //uint8_t *const dst[],
out_linesize); //const int dstStride[]);


if (response < 0)
{
printf("Error: sws_scale response = %d\n", response);
return -1;
}
////////////////////////////////////////////////////////////////////////////


//Write YUV420p output image to binary file (for testing)
//You may execute FFmpeg after conversion for testing the output:
//ffmpeg -y -f rawvideo -s 192x108 -pixel_format yuv420p -i yuv420p_image.bin rgb.png
////////////////////////////////////////////////////////////////////////////
f = fopen("yuv420p_image.bin", "wb");
fwrite(out_planes[0], 1, width * height, f);
fwrite(out_planes[1], 1, width * height / 4, f);
fwrite(out_planes[2], 1, width * height / 4, f);
fclose(f);
////////////////////////////////////////////////////////////////////////////


//Free allocated memory
////////////////////////////////////////////////////////////////////////////
av_freep(out_planes);
sws_freeContext(sws_context);
delete[] rgb_in;
////////////////////////////////////////////////////////////////////////////

return 0;
}

关于c++ - 如何从 GL_RGB 转换为 AVFrame,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/71808856/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com