gpt4 book ai didi

c++ - FFMPEG 错误 : Exactly one scaler algorithm must be chosen

转载 作者:行者123 更新时间:2023-11-30 05:42:49 26 4
gpt4 key购买 nike

我目前正在从事 FFMPEG 项目。我正在尝试使用此代码将 RGB 图像转换为 YUV 图像(我昨晚在互联网上找到它):

 void Decode::video_encode_example(const char *filename, int codec_id)

{

AVCodec *codec;

AVCodecContext *c= NULL;

int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };

printf("Encode video file %s\n", filename);

/* find the mpeg1 video encoder */
codec = avcodec_find_encoder((enum AVCodecID)codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}

c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(2);
}

/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational){1,25};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;

if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);

/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(3);
}

f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(4);
}

frame = avcodec_alloc_frame();// Dans une version plus récente c'est av_frame_alloc
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(5);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;

/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(6);
}

//
// RGB to YUV:
// http://stackoverflow.com/questions/16667687/how-to-convert-rgb-from-yuv420p-for-ffmpeg-encoder
//
// Create some dummy RGB "frame"
uint8_t *rgba32Data = new uint8_t[4*c->width*c->height];

SwsContext * ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_RGBA, c->width, c->height,
AV_PIX_FMT_YUV420P, 0, 0, 0, 0);


/* encode 1 second of video */
for (i = 0; i < 25; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;


fflush(stdout);
/* prepare a dummy image */
/* Y */
// for (y = 0; y < c->height; y++) {
// for (x = 0; x < c->width; x++) {
// frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
// }
// }
//
// /* Cb and Cr */
// for (y = 0; y < c->height/2; y++) {
// for (x = 0; x < c->width/2; x++) {
// frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
// frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
// }
// }

uint8_t *pos = rgba32Data;
for (y = 0; y < c->height; y++)
{
for (x = 0; x < c->width; x++)
{
pos[0] = i / (float)25 * 255;
pos[1] = 0;
pos[2] = x / (float)(c->width) * 255;
pos[3] = 255;
pos += 4;
}
}

uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
//
// NOTE: In a more general setting, the rows of your input image may
// be padded; that is, the bytes per row may not be 4 * width.
// In such cases, inLineSize should be set to that padded width.
//
int inLinesize[1] = { 4*c->width }; // RGBA stride
sws_scale(ctx, inData, inLinesize, 0, c->height, frame->data, frame->linesize);

frame->pts = i;

/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(7);
}

if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}

/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);

ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(8);
}

if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}

/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);

avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
avcodec_free_frame(&frame);// Dans une version plus récente c'est av_frame_alloc
printf("\n");
}

int main()
{

Decode d;

avcodec_register_all();

d.video_encode_example("/home/Dave/Desktop/test.mpg",AV_CODEC_ID_MPEG2VIDEO);

}

当我运行这个应用程序时,我的 Linux 终端显示以下错误:

[swscaler @ 0x1e1dc60] 必须选择一个缩放器算法
段错误(核心已转储)

我不知道实际发生了什么。你能帮帮我吗?

最佳答案

SwsContext * ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_RGBA, c->width, c->height,
AV_PIX_FMT_YUV420P, 0, 0, 0, 0);

您的“标志”字段(参见 docs - 4 个零列表中的第一个)必须是非零的。有效值为 this page 顶部列表中的 SWS_FAST_BILINEAR - SWS_ERROR_DIFFUSION .一个好的默认设置是只设置缩放算法,并使用例如SWS_BICUBIC作为 bicubic 的值插值。高端缩放算法(如 SPS_SPLINE 用于 spline 插值)的计算量更大,而低端缩放算法(如 SWS_POINT 用于 nearest neighbour 插值)往往看起来更糟,因此您选择的确切值这取决于您愿意花费多少 CPU 与您对最终结果的视觉质量的关心程度。

关于c++ - FFMPEG 错误 : Exactly one scaler algorithm must be chosen,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/30501021/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com