- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在使用 ffmpeg 开发一个 c++ 项目。我必须生成一个带有 h264 编码的 mp4 文件。
我的问题是文件生成但是当用 VLC 读取文件时我没有图像,并用 ffprobe 分析它给我(下面的日志)错误:
unspecified pixel format
ffprobe version N-93020-g3224d6691c Copyright (c) 2007-2019 the FFmpeg developers
built with gcc 8.2.1 (GCC) 20181201
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
libavutil 56. 26.100 / 56. 26.100
libavcodec 58. 44.100 / 58. 44.100
libavformat 58. 26.100 / 58. 26.100
libavdevice 58. 6.101 / 58. 6.101
libavfilter 7. 48.100 / 7. 48.100
libswscale 5. 4.100 / 5. 4.100
libswresample 3. 4.100 / 3. 4.100
libpostproc 55. 4.100 / 55. 4.100
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
...
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'C:\Users\Fabrice\Desktop\video\Test.mp4':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2avc1mp41
encoder : Lavf58.26.100
Duration: 00:00:09.00, start: 0.000000, bitrate: 323 kb/s
Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s, 25.11 fps, 25 tbr, 12800 tbn, 25600 tbc (default)
Metadata:
handler_name : VideoHandler
这是我用来生成我的 mp4 文件的代码,它基于来自 ffmpeg 的示例(参见:
FFMPEG Muxing sample)。我试图在不使用已弃用功能的情况下对其进行调整。它使用 webm/vp8 编码工作,但不是 mp4/h264。
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
#include <libavutil/error.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
/* 10 seconds stream duration */
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
//#pragma warning(disable : 4996) // TODO: remove
static int sws_flags = SWS_BICUBIC;
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
AVStream *stream;
// Get the encoder codec
*codec = avcodec_find_encoder(codecId);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codecId));
exit(1);
}
// Get the stream for codec
stream = avformat_new_stream(formatContext, *codec);
if (!stream) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
stream->id = formatContext->nb_streams - 1;
(*codecCtx) = avcodec_alloc_context3(*codec);
switch ((*codec)->type) {
case AVMEDIA_TYPE_VIDEO:
stream->codecpar->codec_id = codecId;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->bit_rate = 400000;
stream->codecpar->width = 352;
stream->codecpar->height = 288;
stream->codecpar->format = STREAM_PIX_FMT;
stream->time_base = { 1, STREAM_FRAME_RATE };
avcodec_parameters_to_context((*codecCtx), stream->codecpar);
(*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
(*codecCtx)->max_b_frames = 2;
(*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
(*codecCtx)->mb_decision = 2;
}
break;
default:
break;
}
//if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
// av_opt_set(codecCtx, "preset", "ultrafast", 0);
//}
//(*codecCtx)->flags |= AV_CODEC_FLAG_LOW_DELAY;
/* Some formats want stream headers to be separate. */
if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
(*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
return false;
}
return stream;
}
/**************************************************************/
/* video output */
static AVFrame *frame_video;
static int frame_count;
static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
int ret;
/* open the codec */
ret = avcodec_open2(codecCtx, codec, NULL);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Could not open video codec: %s\n", error);
exit(1);
}
/* allocate and init a re-usable frame */
frame_video = av_frame_alloc();
if (!frame_video) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_video->format = codecCtx->pix_fmt;
frame_video->width = codecCtx->width;
frame_video->height = codecCtx->height;
ret = av_frame_get_buffer(frame_video, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
int ret;
static struct SwsContext *sws_ctx;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
}
else {
if (codecCtx->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P,
codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr, "Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
0, codecCtx->height, frame_video->data, frame_video->linesize);
}
else {
fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
}
}
frame_video->format = AV_PIX_FMT_YUV420P;
frame_video->width = codecCtx->width;
frame_video->height = codecCtx->height;
if (formatContext->oformat->flags & 0x0020) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = stream->index;
pkt.data = frame_video->data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(formatContext, &pkt);
}
else {
AVPacket pkt = { 0 };
av_init_packet(&pkt);
/* encode the image */
fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
ret = avcodec_send_frame(codecCtx, frame_video);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error encoding video frame: %s\n", error);
exit(1);
}
/* If size is zero, it means the image was buffered. */
ret = avcodec_receive_packet(codecCtx, &pkt);
if (!ret && pkt.size) {
pkt.stream_index = stream->index;
fprintf(stderr, "Packet flags : %d\n", pkt.flags);
fprintf(stderr, "Packet pts: %lld\n", pkt.pts);
fprintf(stderr, "Packet dts: %lld\n", pkt.dts);
fprintf(stderr, "Packet duration: %lld\n", pkt.duration);
fprintf(stderr, "Packet pos: %lld\n\n", pkt.pos);
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(formatContext, &pkt);
}
else {
ret = 0;
}
}
if (ret != 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error while writing video frame: %s\n", error);
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
av_free(frame_video->data[0]);
av_free(frame_video);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
// The outputed media
char filename[100];
const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
//const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
AVOutputFormat *formatOut;
AVFormatContext *formatCtx;
// The video stream
AVStream *stream_video;
AVCodec *codec_video = nullptr;
AVCodecContext *codecCtx_video = nullptr;
double time_video = 0;
// Return code
int ret;
strcpy_s(filename, "C:\\Test.");
strcat_s(filename, mediaFormat);
// allocate the output media context
avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
if (!formatCtx) {
return 1;
}
formatOut = formatCtx->oformat;
// Add the video stream using H264 codec
stream_video = NULL;
stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);
// Open video codec and allocate the necessary encode buffers
if (stream_video)
open_video(codec_video, stream_video, codecCtx_video);
av_dump_format(formatCtx, 0, filename, 1);
// Open the output media file, if needed
if (!(formatOut->flags & AVFMT_NOFILE)) {
ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Could not open '%s': %s\n", filename, error);
return 1;
}
}
// Write media header
ret = avformat_write_header(formatCtx, NULL);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error occurred when opening output file: %s\n", error);
return 1;
}
if (frame_video)
frame_video->pts = 0;
for (;;) {
// Compute video time from last added video frame
time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);
// Stop media if enough time
if (!stream_video || time_video >= STREAM_DURATION)
break;
// Add a video frame
write_video_frame(formatCtx, stream_video, codecCtx_video);
// Increase frame pts according to time base
frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
}
// Write media trailer
av_write_trailer(formatCtx);
/* Close each codec. */
if (stream_video)
close_video(formatCtx, stream_video);
if (!(formatOut->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_close(formatCtx->pb);
/* free the stream */
avformat_free_context(formatCtx);
return 0;
}
我错过了什么?哪个部分给了我这个错误?
最佳答案
我必须评论这些行以使其正常工作。
if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
(*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
这是带有一些更正的完整代码:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
#include <libavutil/error.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
/* 10 seconds stream duration */
#define STREAM_DURATION 100.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
//#pragma warning(disable : 4996) // TODO: remove
static int sws_flags = SWS_BICUBIC;
static int __WritePacket(void* opaque, uint8_t* buf, int buf_size)
{
FILE *f = (FILE *)opaque;
fprintf(stderr, "writen : %d\n", fwrite(buf, sizeof(uint8_t), buf_size, f));
fflush(f);
return buf_size;
}
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
AVStream *stream;
// Get the encoder codec
*codec = avcodec_find_encoder(codecId);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codecId));
exit(1);
}
// Get the stream for codec
stream = avformat_new_stream(formatContext, *codec);
if (!stream) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
stream->id = formatContext->nb_streams - 1;
(*codecCtx) = avcodec_alloc_context3(*codec);
switch ((*codec)->type) {
case AVMEDIA_TYPE_VIDEO:
stream->codecpar->codec_id = codecId;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->bit_rate = 400000;
stream->codecpar->width = 352;
stream->codecpar->height = 288;
stream->codecpar->format = STREAM_PIX_FMT;
stream->codecpar->codec_tag = 0x31637661;
stream->codecpar->video_delay = 0;
stream->time_base = { 1, STREAM_FRAME_RATE };
avcodec_parameters_to_context((*codecCtx), stream->codecpar);
(*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
(*codecCtx)->max_b_frames = 2;
(*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
(*codecCtx)->framerate = { STREAM_FRAME_RATE, 1 };
(*codecCtx)->pix_fmt = STREAM_PIX_FMT;
if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
(*codecCtx)->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
// Setting this option make the video stream not readable.
// if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
// (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
return false;
}
return stream;
}
/**************************************************************/
/* video output */
static AVFrame *frame_video;
static int frame_count;
static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
int ret;
/* open the codec */
ret = avcodec_open2(codecCtx, codec, NULL);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Could not open video codec: %s\n", error);
exit(1);
}
/* allocate and init a re-usable frame */
frame_video = av_frame_alloc();
if (!frame_video) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_video->format = codecCtx->pix_fmt;
frame_video->width = codecCtx->width;
frame_video->height = codecCtx->height;
ret = av_frame_get_buffer(frame_video, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
exit(1);
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
int ret;
static struct SwsContext *sws_ctx;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
}
else {
if (codecCtx->pix_fmt != STREAM_PIX_FMT) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, STREAM_PIX_FMT,
codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr, "Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
0, codecCtx->height, frame_video->data, frame_video->linesize);
}
else {
fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
}
}
frame_video->format = STREAM_PIX_FMT;
frame_video->width = codecCtx->width;
frame_video->height = codecCtx->height;
if (formatContext->oformat->flags & 0x0020) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = stream->index;
pkt.data = frame_video->data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(formatContext, &pkt);
}
else {
AVPacket packet = { 0 };
av_init_packet(&packet);
/* encode the image */
fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
ret = avcodec_send_frame(codecCtx, frame_video);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error encoding video frame: %s\n", error);
exit(1);
}
/* If size is zero, it means the image was buffered. */
ret = avcodec_receive_packet(codecCtx, &packet);
if (!ret && packet.size) {
packet.stream_index = stream->index;
fprintf(stderr, "Packet flags : %d\n", packet.flags);
fprintf(stderr, "Packet pts: %lld\n", packet.pts);
fprintf(stderr, "Packet dts: %lld\n", packet.dts);
fprintf(stderr, "Packet duration: %lld\n", packet.duration);
fprintf(stderr, "Packet pos: %lld\n\n", packet.pos);
FILE *f = nullptr;
fopen_s(&f, "C:\\Users\\Fabrice\\Desktop\\video\\Test.h264", "wb");
fwrite(packet.data, sizeof(uint8_t), packet.size, f);
fclose(f);
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(formatContext, &packet);
}
else {
ret = 0;
}
}
if (ret != 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error while writing video frame: %s\n", error);
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
av_free(frame_video->data[0]);
av_free(frame_video);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
// The outputed media
char filename[100];
const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
//const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
AVOutputFormat *formatOut;
AVFormatContext *formatCtx;
// The video stream
AVStream *stream_video;
AVCodec *codec_video = nullptr;
AVCodecContext *codecCtx_video = nullptr;
double time_video = 0;
// Return code
int ret;
strcpy_s(filename, "C:\\Users\\Fabrice\\Desktop\\video\\Test.");
strcat_s(filename, mediaFormat);
remove("C:\\Users\\Fabrice\\Desktop\\video\\Test.h264");
remove(filename);
// allocate the output media context
avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
if (!formatCtx) {
return 1;
}
formatOut = formatCtx->oformat;
// Add the video stream using H264 codec
stream_video = NULL;
stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);
// Open video codec and allocate the necessary encode buffers
if (stream_video)
open_video(codec_video, stream_video, codecCtx_video);
av_dump_format(formatCtx, 0, filename, 1);
/*// Open the output media file, if needed
if (!(formatOut->flags & AVFMT_NOFILE)) {
ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Could not open '%s': %s\n", filename, error);
return 1;
}
}*/
uint8_t *ioBuffer = (uint8_t*)av_malloc(4096);
if (!ioBuffer) {
return 1;
}
FILE *f = nullptr;
fopen_s(&f, filename, "wb");
AVIOContext *ioCtx = avio_alloc_context(ioBuffer, 4096, 1, f, NULL, __WritePacket, NULL);
if (!ioCtx) {
return 1;
}
formatCtx->pb = ioCtx;
formatCtx->flush_packets = 1;
fprintf(stderr, "Stream timebase: %d/%d\n", stream_video->time_base.num, stream_video->time_base.den);
// Fragmented mp4
AVDictionary* opts = NULL;
av_dict_set(&opts, "movflags", "frag_keyframe+empty_moov", 0);
// Write media header
ret = avformat_write_header(formatCtx, &opts);
if (ret < 0) {
char error[255];
av_strerror(ret, error, 255);
fprintf(stderr, "Error occurred when opening output file: %s\n", error);
return 1;
}
fprintf(stderr, "Stream timebase: %d/%d\n", stream_video->time_base.num, stream_video->time_base.den);
if (frame_video)
frame_video->pts = 0;
for (;;) {
// Compute video time from last added video frame
time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);
// Stop media if enough time
if (!stream_video || time_video >= STREAM_DURATION)
break;
// Add a video frame
write_video_frame(formatCtx, stream_video, codecCtx_video);
// Increase frame pts according to time base
frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
}
// Write media trailer
av_write_trailer(formatCtx);
/* Close each codec. */
if (stream_video)
close_video(formatCtx, stream_video);
/* free the stream */
avformat_free_context(formatCtx);
return 0;
}
关于c++ - FFMPEG API Mp4 H264 编码/复用 - 未指定的像素格式,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/63017690/
本文书接上回《反DDD模式之关系型数据库》,关注公众号(老肖想当外语大佬)获取信息: 最新文章更新; DDD框架源码(.NET、Java双平台); 加群畅聊,建模分析、技术实
我里面有 VC 和一个 collectionView。所有管理 Collection View 的代码我都放在那个 VC 的扩展中。但现在我需要在另一个不同的 VC 中使用这个 Collection
我很难重新使用子机图。 我需要重新使用我已链接到一个状态的状态机作为子机,在另一个状态中作为子机。但是当我给出对它的引用时,我得到一个空指针异常。 引用图片 我已经添加了对 GeneralTopup
我想尝试 lambda 的一些功能,并想编写一个 ArrayList 过滤器,并使用 IntStream 的方法来计算 ArrayList 中数字的平均值和最大值 我的第一个想法是过滤 ArrayLi
我正在开发一个 NodeJS 应用程序并使用 Mocha 进行单元测试。 假设我有两个非常相似的测试服。事实上,这些是针对两个类的测试实现相同的接口(interface)。 例如: suit_a.js
我正在使用 Glade 编写带有对话框的 python GUI。 如果我不使用 Glade,我会使用一个类来创建一个对话窗口 (dialag),运行它 (dialog.run),执行它所做的任何事情,
我在使用自定义单元格创建 UICollectionView 以显示项目时遇到问题。但是在 UICollectionView 刷新后,可重用的单元格填充了错误的索引 刷新前的 UICollectionV
我从 Sencha 学习 ExtJS 并有下一个简单的任务: 我的页面上有 2 个 div 在第一个 div 中我渲染 Ext.Button 在按钮上单击我想将其移动到另一个 div 仅此而已 我写了
我想在不同的 Node 模块中重用 RabbitMQ channel 。由于 channel 是异步创建的,我不确定将此 channel 对象“注入(inject)”到其他模块的最佳方法是什么。 如果
所以我的问题是我收到一个 SIGABRT 错误,其定义如下: *** Terminating app due to uncaught exception 'NSInvalidArgumentExcep
我正在编写一个 PHP 脚本来将主题从旧论坛站点迁移到新站点。 旧论坛站点使用数据库“old_forums” 新论坛站点使用数据库“new_forums” MySQL 用户“forums”拥有两个数据
我有一个使用 jcodec 生成的 MP4 文件。 然后我就有了一个使用 Android 的 MediaCodec 生成的 AAC 文件。 我想将它们混合到一个文件中,并且由于我不想将我的 Andro
我正在使用 ffmpeg 开发一个 c++ 项目。我必须生成一个带有 h264 编码的 mp4 文件。 我的问题是文件生成但是当用 VLC 读取文件时我没有图像,并用 ffprobe 分析它给我(下面
我将尝试重新提出这个问题。 我想要做的是创建一个新的 mp4 文件,其中将包括一个视频文件、两个音频文件和一个字幕文件。我想创建一个可以在我的 iOS 设备和计算机上播放的 mp4 文件。 文件如下:
虽然我的问题可能看起来很抽象,但我希望不是。假设我开发了一个应用程序,一个 ASP.NET MVC 站点,然后我的任务是为这个应用程序构建一个 Winforms 客户端,我可以从现有应用程序中重用多少
我是一名优秀的程序员,十分优秀!