- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在使用 FFMPEG lib 编写屏幕录像机,但在输出中它只显示绿屏视频。
我的代码如下
#define __STDC_CONSTANT_MACROS
#include<iostream>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
//#include <libswscale/swscale.h>
}
using namespace std;
//const char* out_filename = "D:\\my\\folder\\to\\Frame.jpg";
AVCodecContext* GetCodecContextFromPar(AVCodecParameters * par)
{
AVCodecContext* cntxt = NULL;
cntxt = avcodec_alloc_context3(avcodec_find_decoder(par->codec_id));
avcodec_parameters_to_context(cntxt,par);
return cntxt;
}
int AvCodecDecodeVideo2(AVCodecContext* avctx, AVFrame* frame, int* got_picture_ptr, const AVPacket* avpkt)
{
int ret = avcodec_send_packet(avctx, avpkt);
if (ret < 0)
{
return -1;
*got_picture_ptr = 0;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(avctx, frame);
}
*got_picture_ptr = 1;
return 0;
}
int AvCodecEncodeVideo2(AVCodecContext* avctx, AVPacket* avpkt, const AVFrame* frame, int* got_packet_ptr)
{
char str2[] = "";
int Res = avcodec_send_frame(avctx, frame);
while (Res >= 0)
{
Res = avcodec_send_frame(avctx, frame);
}
//avcodec_encode_video2
Res = avcodec_receive_packet(avctx, avpkt);
if (Res == 0)
{
*got_packet_ptr = 1;
return 0;
}
cout << "\nError :" << av_make_error_string(str2, sizeof(str2), Res);
return -1;
}
int main(int argc, char** argv)
{
const char* out_filename = "D:\\myfolder\\to\\output\\new_out.mp4";
avdevice_register_all();
AVOutputFormat* ofmt = NULL;
AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters * av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->height = 480;
av_codec_par_out->width = 640;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_MPEG4;
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
av_codec_par_out->sample_aspect_ratio.den = 3;
av_codec_par_out->sample_aspect_ratio.num = 4;
AVDictionary* options = NULL;
av_dict_set(&options,"framerate","30",0);
av_dict_set(&options,"offset_x","20",0);
av_dict_set(&options,"offset_y","40",0);
av_dict_set(&options,"video_size","640x480",0);
int ret, i;
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) <0)
{
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
/* find the first video stream index . Also there is an API available to do the below operations */
for (int i = 0; i < ifmt_ctx->nb_streams; i++) // find video stream posistion/index.
{
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1)
{
cout << "\nunable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL)
{
cout << "\nunable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
{
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "\nunable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt)
{
cout << "\nerror in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx)
{
cout << "\nerror in allocating av format output context";
exit(1);
}
AVCodec * av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL)
{
cout << "\nunable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream)
{
cout << "\nerror in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out)
{
cout << "\nerror in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
{
cout << "\nCodec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
{
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
av_cntx_out->gop_size = 3;
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = 30; //
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "\nunable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
}
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0)
{
cout << "\nerror in writing the header context";
exit(1);
}
AVPacket * av_pkt = av_packet_alloc();
av_init_packet(av_pkt);
AVFrame * av_frame = av_frame_alloc();
if (!av_frame)
{
cout << "\nunable to release the avframe resources";
exit(1);
}
AVFrame * outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame)
{
cout << "\nunable to release the avframe resources for outframe";
exit(1);
}
//int video_outbuf_size;
//int nbytes = av_image_get_buffer_size(av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32);
//uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
//if (video_outbuf == NULL)
//{
// cout << "\nunable to allocate memory";
// exit(1);
//}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
//value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32); // returns : the size in bytes required for src
//if (value < 0)
//{
// cout << "\nerror in filling image array";
//}
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0)
{
cout << "\nUnable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL)
{
cout << "\n Cannot alocate SWC Context";
exit(1);
}
int ii = 0;
int no_frames = 100;
cout << "\nenter No. of frames to capture : ";
cin >> no_frames;
int flag;
int frameFinished;
int got_picture;
int frame_index = 0;
AVPacket * outPacket=av_packet_alloc();
int j = 0;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
{
if (ii++ == no_frames)break;
if (av_pkt->stream_index == VideoStreamIndx)
{
value = AvCodecDecodeVideo2(avcodec_contx, av_frame, &frameFinished, av_pkt);
if (value < 0)
{
cout << "unable to decode video";
exit(1);
}
if (frameFinished)// Frame successfully decoded :)
{
av_init_packet(outPacket);
//int iHeight =sws_scale(swsCtx, av_frame->data, av_frame->linesize, 0, avcodec_contx->height, outFrame->data, outFrame->linesize);
outPacket->data = NULL; // packet data will be allocated by the encoder
outPacket->size = 0;
if (AvCodecEncodeVideo2(av_cntx_out, outPacket, outFrame, &got_picture) < 0)
{
cout << "unable to encode video";
exit(1);
}
if (got_picture)
{
if (outPacket->pts != AV_NOPTS_VALUE)
outPacket->pts = av_rescale_q(outPacket->pts, av_cntx_out->time_base, video_stream->time_base);
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(outPacket->dts, av_cntx_out->time_base, video_stream->time_base);
printf("Write frame %3d (size= %2d)\n", j++, outPacket->size / 1000);
if (av_write_frame(ofmt_ctx, outPacket) != 0)
{
cout << "\nerror in writing video frame";
}
av_packet_unref(outPacket);
} // got_picture
av_packet_unref(outPacket);
} // frameFinished
}
}// End of while-loop
value = av_write_trailer(ofmt_ctx);
if (value < 0)
{
cout << "\nerror in writing av trailer";
exit(1);
}
//THIS WAS ADDED LATER
/*av_free(video_outbuf);*/
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx)
{
cout << "\nfile closed sucessfully";
}
else
{
cout << "\nunable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx)
{
cout << "\navformat free successfully";
}
else
{
cout << "\nunable to free avformat context";
exit(1);
}
return 0;
}
我不明白我的代码有什么问题。
最佳答案
绿色视频是在 YUV 颜色空间中对由零填充的帧进行编码的结果(像素为 Y
、U
、V
值等于 0
、0
、0
显示为绿色像素) .
您正在配置 sws_getContext
,但你没有使用它。
对抓取到的视频帧进行解码后,得到一个BGRA像素格式的帧。
我们应该将帧从 BGRA 转换为 YUV420p 像素格式,并将结果写入输出编码器。
在执行您的代码示例时,我遇到了一些奇怪的行为。
我尝试使用以下 post 中的代码片段来修复代码.
笔记:
#define __STDC_CONSTANT_MACROS
#include<iostream>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
using namespace std;
AVCodecContext* GetCodecContextFromPar(AVCodecParameters* par)
{
AVCodecContext* cntxt = NULL;
cntxt = avcodec_alloc_context3(avcodec_find_decoder(par->codec_id));
avcodec_parameters_to_context(cntxt, par);
return cntxt;
}
int AvCodecDecodeVideo2(AVCodecContext* avctx, AVFrame* frame, int* got_picture_ptr, const AVPacket* avpkt)
{
int ret = avcodec_send_packet(avctx, avpkt);
if (ret < 0)
{
return -1;
*got_picture_ptr = 0;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(avctx, frame);
}
*got_picture_ptr = 1;
return 0;
}
int main(int argc, char** argv)
{
//const char* out_filename = "D:\\myfolder\\to\\output\\new_out.mp4";
const char* out_filename = "new_out.mp4";
avdevice_register_all();
AVOutputFormat* ofmt = NULL;
AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->height = 480;
av_codec_par_out->width = 640;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264; //AV_CODEC_ID_MPEG4; //Try H.264 instead of MPEG4
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
av_codec_par_out->sample_aspect_ratio.den = 3;
av_codec_par_out->sample_aspect_ratio.num = 4;
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", 0);
av_dict_set(&options, "offset_x", "20", 0);
av_dict_set(&options, "offset_y", "40", 0);
av_dict_set(&options, "video_size", "640x480", 0);
//int ret, i;
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0)
{
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
/* find the first video stream index . Also there is an API available to do the below operations */
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) // find video stream position/index.
{
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1)
{
cout << "\nunable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL)
{
cout << "\nunable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
//Consider using preset and crf
//av_opt_set(avcodec_contx->priv_data, "preset", "fast", 0);
//av_opt_set(avcodec_contx->priv_data, "crf", "18", 0);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
{
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "\nunable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt)
{
cout << "\nerror in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx)
{
cout << "\nerror in allocating av format output context";
exit(1);
}
AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL)
{
cout << "\nunable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream)
{
cout << "\nerror in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out)
{
cout << "\nerror in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
{
cout << "\nCodec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
{
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
av_cntx_out->gop_size = 30;//3; //Use I-Frame frame every second.
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = 30;
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "\nunable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
}
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0)
{
cout << "\nerror in writing the header context";
exit(1);
}
AVPacket* av_pkt = av_packet_alloc();
//av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
memset(av_pkt, 0, sizeof(AVPacket)); //???
AVFrame* av_frame = av_frame_alloc();
if (!av_frame)
{
cout << "\nunable to release the avframe resources";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame)
{
cout << "\nunable to release the avframe resources for outframe";
exit(1);
}
//int video_outbuf_size;
//int nbytes = av_image_get_buffer_size(av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32);
//uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
//if (video_outbuf == NULL)
//{
// cout << "\nunable to allocate memory";
// exit(1);
//}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
//value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32); // returns : the size in bytes required for src
//if (value < 0)
//{
// cout << "\nerror in filling image array";
//}
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0)
{
cout << "\nUnable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL)
{
cout << "\n Cannot allocate SWC Context";
exit(1);
}
int ii = 0;
int enc_packet_counter = 0; //Count encoded frames.
int no_frames = 100;
//cout << "\nenter No. of frames to capture : ";
//cin >> no_frames;
//int flag;
int frameFinished;
//int got_picture;
int frame_index = 0;
AVPacket* outPacket = av_packet_alloc();
int j = 0;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
{
if (ii++ == no_frames)
break;
if (av_pkt->stream_index == VideoStreamIndx)
{
//value = AvCodecDecodeVideo2(avcodec_contx, av_frame, &frameFinished, av_pkt);
//if (value < 0)
//{
// cout << "unable to decode video";
// exit(1);
//}
int ret = avcodec_send_packet(avcodec_contx, av_pkt);
if (ret < 0)
{
printf("Error while sending packet");
}
frameFinished = true;
int response = 0;
//av_frame_unref(av_frame); //???
//do
//{
response = avcodec_receive_frame(avcodec_contx, av_frame);
if (response < 0) //&& (response != AVERROR(EAGAIN)) && (response != AVERROR_EOF))
{
printf("Error while receiving frame from decoder");
frameFinished = false;
}
//}
//while (response == AVERROR(EAGAIN));
if (frameFinished)// Frame successfully decoded :)
{
//av_init_packet(outPacket); //error C4996: 'av_init_packet': was declared deprecated
memset(outPacket, 0, sizeof(AVPacket)); //???
//int iHeight =sws_scale(swsCtx, av_frame->data, av_frame->linesize, 0, avcodec_contx->height, outFrame->data, outFrame->linesize);
outPacket->data = NULL; // packet data will be allocated by the encoder
outPacket->size = 0;
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
enc_packet_counter++;
//Apply color space conversion from BGRA to YUV420p using sws_scale
////////////////////////////////////////////////////////////////
int sts = sws_scale(swsCtx, //struct SwsContext *c,
av_frame->data, //const uint8_t *const srcSlice[],
av_frame->linesize, //const int srcStride[],
0, //int srcSliceY,
av_frame->height, //int srcSliceH,
outFrame->data, //uint8_t *const dst[],
outFrame->linesize); //const int dstStride[]);
if (sts < 0)
{
printf("Error while executing sws_scale");
}
////////////////////////////////////////////////////////////////
int ret = 0;
do
{
if (ret == AVERROR(EAGAIN))
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (ret) break; // deal with error
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
}
else if (ret != 0)
{
char str2[] = "";
cout << "\nError :" << av_make_error_string(str2, sizeof(str2), ret);
return -1;
}
ret = avcodec_send_frame(av_cntx_out, outFrame);
} while (ret);
//if (AvCodecEncodeVideo2(av_cntx_out, outPacket, outFrame, &got_picture) < 0)
//{
// cout << "unable to encode video";
// exit(1);
//}
//
//if (got_picture)
//{
// if (outPacket->pts != AV_NOPTS_VALUE)
// outPacket->pts = av_rescale_q(outPacket->pts, av_cntx_out->time_base, video_stream->time_base);
// if (outPacket->dts != AV_NOPTS_VALUE)
// outPacket->dts = av_rescale_q(outPacket->dts, av_cntx_out->time_base, video_stream->time_base);
//
// //Set packet duration
// ////////////////////////////////////////////////////////////
// //AVRational avg_frame_rate = av_make_q(30, 1); //30 fps
// //int64_t avp_duration = av_cntx_out->time_base.den / av_cntx_out->time_base.num / avg_frame_rate.num * avg_frame_rate.den;
// //outPacket->duration = avp_duration;
// outPacket->duration = 1; //Since the time base is 1/30, the duration equals 1
// ////////////////////////////////////////////////////////////
//
// printf("Write frame %3d (size= %2d)\n", j++, outPacket->size / 1000);
// if (av_write_frame(ofmt_ctx, outPacket) != 0)
// {
// cout << "\nerror in writing video frame";
// }
//
// av_packet_unref(outPacket);
//} // got_picture
//
//av_packet_unref(outPacket);
} // frameFinished
}
}// End of while-loop
// flush the rest of the packets ???
////////////////////////////////////////////////////////////
int ret = 0;
avcodec_send_frame(av_cntx_out, NULL);
do
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (!ret)
{
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
enc_packet_counter++;
}
} while (!ret);
////////////////////////////////////////////////////////////
value = av_write_trailer(ofmt_ctx);
if (value < 0)
{
cout << "\nerror in writing av trailer";
exit(1);
}
//THIS WAS ADDED LATER
/*av_free(video_outbuf);*/
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx)
{
cout << "\nfile closed successfully";
}
else
{
cout << "\nunable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx)
{
cout << "\navformat free successfully";
}
else
{
cout << "\nunable to free avformat context";
exit(1);
}
//Free codec context.
////////////////////////////////////////////////////////////
avcodec_free_context(&av_cntx_out);
if (!av_cntx_out)
{
cout << "\navcodec free successfully";
}
else
{
cout << "\nunable to free avcodec context";
exit(1);
}
////////////////////////////////////////////////////////////
return 0;
}
关于c++ - 为什么FFMPEG屏幕录像机输出只显示绿屏?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/70390402/
我正在尝试使用 ldash 等选项和 http_opts ,正如 dash muxer 文档所描述的,但 FFmpeg 无法识别它们。我正在使用最新发布的 ffmpeg v4.2.2 版本。我在 ff
假设我们有许多想要与 -vcodec 副本(或等效语法)合并的视频记录。无需重新编码,不会损失质量。并且很少有记录(minor set),有另外的编解码器,参数等等。所以我们可以使用 ffprobe
有没有办法安装 ffmpeg 没有root访问权限?使用 ./configure 无法做到这一点来自 git 克隆 git://source.FFmpeg.org/fFFmpeg.git 最佳答案 是
在应用程序中直接使用 FFmpeg 与使用 Ffmpeg 命令行有什么区别? 最佳答案 没有:FFmpeg 命令行只是一个使用 FFmpeg API 的应用程序。当然,在使用该应用程序时,您仅限于已实
我正在使用以下命令对文件(下面的媒体信息)进行编码: ffmpeg -i AHomeMovie.mkv -map 0 -c copy -c:v libx264 -preset veryslow -cr
我正在制作一张圣诞贺卡,我需要将视频嵌入到右侧(边框内)的卡片中,并在左侧显示一些文本。 为简单起见,假设我有一个带有透明孔的盒子。我想在那个洞里显示视频。 我正在使用 ffmpeg-python很高
我正在使用 laravel ffmpeg 为视频创建缩略图,但是当我运行代码时,它返回给我 Call to undefined method FFMpeg\FFMpeg::fromDisk() 我不知
我为我的 nvidia 下载了 cuda 驱动程序 但它仍然不使用我的 GPU,它仍然使用 cpu。 我怎样才能让它使用GPU。 我也听说过硬件加速,但那不起作用。 它必须是 h.264 最佳答案 你
尝试剪切视频的多个部分时,我遵循此问题的解决方案 Cut multiple parts of a video with ffmpeg .但问题是,如果我剪切多次(比如大约 20 次或更多),视频和音频
所以我最近开始在我打算在商业上分发的应用程序中实现 ffmpeg。而且我很难理解整个许可过程。 我见过的最常回答的问题似乎是关于 x264,它需要 x264.org 的付费许可才能在商业上使用它(对吗
我使用 ffmpeg 更改视频文件的分辨率,转换到另一个位置后,视频持续 0 秒,但最初持续 2 分钟 我的ffmepg代码: ffmpeg -i input.mp4 -filter:v scale=
如上: FFMPEG 不支持在没有第三部分库的情况下加载外部过滤器是否有特定原因? (像弗莱0r) 我必须重新编译整个包来添加一个新的过滤器! 最佳答案 只有开发人员可以肯定地回答,但我会冒安全风险和
我收到了一个编码器,我需要用 FFMPEG 编译,我是新手,所以我不知道如何用 ffmpeg 添加/编译它。编码器是JSV,我的服务器是ubuntu 14.04。 我已经开始阅读这篇 https://
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 我们不允许在 Stack Overflow 上提出有关通用计算硬件和软件的问题。您可以编辑问题,使其成为
我正在制作一个利用 ffmpeg 重新混合和转码视频文件的程序。我想使用 ffmpeg -codecs和 ffmpeg -formats (或通过 ffmpeg 可用的任何其他命令)来检查我可以在哪些
这个问题在这里已经有了答案: ffmpeg moving text drawtext (1 个回答) 3年前关闭。 我正在使用此命令使用 ffmpeg 将文本从一个地方移动到另一个地方 ffmpeg
为什么 ffmpeg/ffprobe 为流和整个文件提供不同的比特率值? 当我使用 ffprobe 分析 mp3 文件时,它会在第一行和第二行给出不同的比特率。 有谁知道,有什么区别? // File
如何在ffmpeg中使用drawtext在视频上绘制多色文本? 示例:我想突出句子中的专有名词, “XYZ公司股价上涨91%” 高亮 XYZ 白色 黄色 用绿色突出显示 91% 如果您有任何其他方法不
我想让我的不和谐机器人播放音乐,但我不断收到“找不到 FFMPEG”错误。 我的机器人主要是由 ping 制成的,所以我不会上传那部分。音乐代码应该是这个。 const Discord = requi
我需要帮助在 ffmpeg drawtext 过滤器中正确/(完全)显示德语变音符号“äüö”。我现在不能说我的无能是由于缺乏 ffmpeg 专业知识或机器配置,还是两者兼而有之。非常感谢您的意见。
我是一名优秀的程序员,十分优秀!