- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试编写一个 C 程序来通过网络上的 RTP 复制带有 rtp_mpegts
的 AV 编解码器来传输 AV
ffmpeg -re -i Sample_AV_15min.ts -acodec copy -vcodec copy -f rtp_mpegts rtp://192.168.1.1:5004
以 muxing.c 为例,其中使用了 ffmpeg 库。ffmpeg 应用程序运行良好。
直播详情
Input #0, mpegts, from 'Weather_Nation_10min.ts':
Duration: 00:10:00.38, start: 41313.400811, bitrate: 2840 kb/s
Program 1
Stream #0:0[0x11]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p, 1440x1080 [SAR 4:3 DAR 16:9], 29.97 fps, 59.94 tbr, 90k tbn, 59.94 tbc
Stream #0:1[0x14]: Audio: ac3 (AC-3 / 0x332D4341), 48000 Hz, stereo, fltp, 448 kb/s
Output #0, rtp_mpegts, to 'rtp://192.168.1.1:5004':
Metadata:
encoder : Lavf54.63.104
Stream #0:0: Video: h264 ([27][0][0][0] / 0x001B), yuv420p, 1440x1080 [SAR 4:3 DAR 16:9], q=2-31, 29.97 fps, 90k tbn, 29.97 tbc
Stream #0:1: Audio: ac3 (AC-3 / 0x332D4341), 48000 Hz, stereo, 448 kb/s
Stream mapping:
Stream #0:0 -> #0:0 (copy)
Stream #0:1 -> #0:1 (copy)
但是,我的应用程序失败了
./my_test_app Sample_AV_15min.ts rtp://192.168.1.1:5004
[h264 @ 0x800b30] non-existing PPS referenced
[h264 @ 0x800b30] non-existing PPS 0 referenced
[h264 @ 0x800b30] decode_slice_header error
[h264 @ 0x800b30] no frame!
[....snipped...]
[h264 @ 0x800b30] non-existing PPS 0 referenced
[h264 @ 0x800b30] non-existing PPS referenced
[h264 @ 0x800b30] non-existing PPS 0 referenced
[h264 @ 0x800b30] decode_slice_header error
[h264 @ 0x800b30] no frame!
[h264 @ 0x800b30] mmco: unref short failure
[h264 @ 0x800b30] mmco: unref short failure
[mpegts @ 0x800020] max_analyze_duration 5000000 reached at 5024000 microseconds
[mpegts @ 0x800020] PES packet size mismatch could not find codec tag for codec id
17075200, default to 0. could not find codec tag for codec id 86019, default to 0.
Could not allocate picture: Invalid argument
Found Video Stream Found Audio Stream
我该如何解决这个问题?我基于muxing.c的完整源码
/**
* @file
* libavformat API example.
*
* Output a media file in any supported libavformat format.
* The default codecs are used.
* @example doc/examples/muxing.c
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <libavutil/mathematics.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
/* 5 seconds stream duration */
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
/**************************************************************/
/* audio output */
static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
#if 0
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams-1;
c = st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
st->id = 1;
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
#endif
/**************************************************************/
/* audio output */
static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
AVCodecContext *c;
int ret;
c = st->codec;
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
/* init signal generator */
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
audio_input_frame_size = 10000;
else
audio_input_frame_size = c->frame_size;
samples = av_malloc(audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels);
if (!samples) {
fprintf(stderr, "Could not allocate audio samples buffer\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
int16_t *q;
q = samples;
for (j = 0; j < frame_size; j++) {
v = (int)(sin(t) * 10000);
for (i = 0; i < nb_channels; i++)
*q++ = v;
t += tincr;
tincr += tincr2;
}
}
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet, ret;
av_init_packet(&pkt);
c = st->codec;
get_audio_frame(samples, audio_input_frame_size, c->channels);
frame->nb_samples = audio_input_frame_size;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(uint8_t *)samples,
audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels, 1);
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (!got_packet)
return;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
avcodec_free_frame(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(samples);
}
/**************************************************************/
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate temporary picture: %s\n",
av_err2str(ret));
exit(1);
}
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index,
int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int ret;
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
} else {
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
c->width, c->height, c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
sws_scale(sws_ctx,
(const uint8_t * const *)src_picture.data, src_picture.linesize,
0, c->height, dst_picture.data, dst_picture.linesize);
} else {
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = dst_picture.data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
AVPacket pkt;
int got_output;
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (got_output) {
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_free(frame);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_pts, video_pts;
int ret;
char errbuf[50];
int i = 0;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if (argc != 3) {
printf("usage: %s input_file out_file|stream\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[2];
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, "rtp_mpegts", filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc) {
return 1;
}
fmt = oc->oformat;
//Find input stream info.
video_st = NULL;
audio_st = NULL;
avformat_open_input( &oc, argv[1], 0, 0);
if ((ret = avformat_find_stream_info(oc, 0))< 0)
{
av_strerror(ret, errbuf,sizeof(errbuf));
printf("Not Able to find stream info::%s ", errbuf);
ret = -1;
return ret;
}
for (i = 0; i < oc->nb_streams; i++)
{
if(oc->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
AVCodecContext *codec_ctx;
unsigned int tag = 0;
printf("Found Video Stream ");
video_st = oc->streams[i];
codec_ctx = video_st->codec;
// m_num_frames = oc->streams[i]->nb_frames;
video_codec = avcodec_find_decoder(codec_ctx->codec_id);
ret = avcodec_open2(codec_ctx, video_codec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
if (av_codec_get_tag2(oc->oformat->codec_tag, video_codec->id, &tag) == 0)
{
av_log(NULL, AV_LOG_ERROR, "could not find codec tag for codec id %d, default to 0.\n", audio_codec->id);
}
video_st->codec = avcodec_alloc_context3(video_codec);
video_st->codec->codec_tag = tag;
}
if(oc->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AVCodecContext *codec_ctx;
unsigned int tag = 0;
printf("Found Audio Stream ");
audio_st = oc->streams[i];
// aud_dts = audio_st->cur_dts;
// aud_pts = audio_st->last_IP_pts;
codec_ctx = audio_st->codec;
audio_codec = avcodec_find_decoder(codec_ctx->codec_id);
ret = avcodec_open2(codec_ctx, audio_codec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
if (av_codec_get_tag2(oc->oformat->codec_tag, audio_codec->id, &tag) == 0)
{
av_log(NULL, AV_LOG_ERROR, "could not find codec tag for codec id %d, default to 0.\n", audio_codec->id);
}
audio_st->codec = avcodec_alloc_context3(audio_codec);
audio_st->codec->codec_tag = tag;
}
}
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
/*
if (fmt->video_codec != AV_CODEC_ID_NONE) {
video_st = add_stream(oc, &video_codec, fmt->video_codec);
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
}
*/
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (video_st)
open_video(oc, video_codec, video_st);
if (audio_st)
open_audio(oc, audio_codec, audio_st);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return 1;
}
if (frame)
frame->pts = 0;
for (;;) {
/* Compute current audio and video time. */
if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;
if (video_st)
video_pts = (double)video_st->pts.val * video_st->time_base.num /
video_st->time_base.den;
else
video_pts = 0.0;
if ((!audio_st || audio_pts >= STREAM_DURATION) &&
(!video_st || video_pts >= STREAM_DURATION))
break;
/* write interleaved audio and video frames */
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_close(oc->pb);
/* free the stream */
avformat_free_context(oc);
return 0;
}
最佳答案
我发现您已经硬编码了很多编解码器参数。您是否已验证编解码器支持它们?例如(借用ffmpeg):
static bool check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
const enum AVSampleFormat *p = codec->sample_fmts;
while (*p != AV_SAMPLE_FMT_NONE) {
if (*p == sample_fmt) {
return true;
}
p++;
}
return false;
}
/* just pick the highest supported samplerate */
static int select_sample_rate(AVCodec *codec)
{
if (!codec->supported_samplerates) {
return 44100;
}
int best_samplerate = 0;
const int *p = codec->supported_samplerates;
while (*p) {
best_samplerate = FFMAX(*p, best_samplerate);
p++;
}
return best_samplerate;
}
关于c - ffmpeg错误 "Could not allocate picture: Invalid argument Found Video Stream Found Audio Stream",我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/47405405/
例如,如果我的程序名称是 test.c 然后对于以下运行命令,argc = 2 而不是 4。 $test abc pqr* *xyz* 最佳答案 尝试运行: $ echo abc pqr* *xyz*
我正在尝试使用一个容器来显示TextField,但是该容器不喜欢我的操作顺序。这是我的代码: Widget build(BuildContext context) { return Scaffol
我有以下代码: class MetricGoogleGateway extends AMetricGateway{ constructor(id, name, token) {
我像这样调用下面的对象方法。 new Cout( elem1 ).load( 'body' ) new COut( elem1 ).display( 'email' ) 我一次只使用一个实例。因为我一
我正在尝试使用 C++11 中的可变参数函数模板,并通过如下代码了解了基本思想: void helper() { std::cout void helper( T&& arg ) {
在学习 ExtJS 4 时,我发现在定义一个新类时,在 initComponent 中方法可以使用 this.callParent(arguments) 调用父类的构造函数. 我想知道这个 argum
使用 XCode 9,Beta 3。Swift 4。 statsView.createButton("Button name") { [weak self] Void in //stuff st
以下代码将打印1: (function (arguments) { console.log(arguments); }(1, 2)); 实际上,arguments 对象已被覆盖。是否可以恢复函
/** * @param $name * @return Response * @Route ("/afficheN/{name}",name="afficheN") */ public fu
我习惯使用Scala scopt用于命令行选项解析。您可以选择参数是否为 .required()通过调用刚刚显示的函数。 如何定义仅在定义了另一个参数时才需要的参数? 例如,我有一个标志 --writ
所以这是我的代码: def is_valid_move(board, column): '''Returns True if and only if there is an o
我试图在这里运行此代码: threads = [threading.Thread(name='ThreadNumber{}'.format(n),target=SB, args(shoe_type,m
在静态类型函数编程语言(例如 Standard ML、F#、OCaml 和 Haskell)中,编写函数时通常将参数彼此分开,并通过空格与函数名称分开: let add a b = a + b
function validateArguments(args) { if(args.length 2) { throw new RangeError("Invalid amo
我正在使用 Django 1.5 并尝试将参数传递到我的 URL。当我使用前两个参数时,下面的代码工作正常,使用第三个参数时我收到错误。我已经引用了新的 Django 1.5 更新中的 url 用法,
我刚刚开始使用 ember js 并且多次被这个功能绊倒 有人可以简要介绍一下 this._super() 的使用,并解释 ...arguments 的重要性 谢谢 最佳答案 每当您覆盖类/函数(例如
这个问题在这里已经有了答案: How to fix an "Argument passed to call that takes no arguments" error? (2 个答案) 关闭 3
我正在创建一个简单的登录注册应用程序。但是我遇到了错误,我不知道如何解决,请帮忙!这是我的代码: // // ViewController.swift // CHLogbook-Applicati
我是 Swift 的初学者。我尝试创建一个表示 Meal 的简单类。 它有一些属性和一个返回可选的构造函数 但是当我尝试测试它或在任何地方实例化它时,我得到的只是一个错误。似乎无法弄清楚发生了什么。
我有一个在特殊环境下运行其他程序的系统程序: cset shield -e PROGRAM .现在要运行一个 java 程序,我输入了 cset shield -e java PROGRAM ,但这不
我是一名优秀的程序员,十分优秀!