gpt4 book ai didi

ios - avcodec_receive_packet() 看不到输出

转载 作者:行者123 更新时间:2023-11-29 11:36:27 27 4
gpt4 key购买 nike

我正在尝试创建一个转换器,它将用一组图像制作视频。一切都在它的位置,AVFormatContextAVCodecContextAVCodec。我正在从 UIImage 创建 YUV AVFrame 并通过 avcodec_send_frame() 方法将其发送到编码器。一切顺利,直到我尝试使用方法 avcodec_receive_packet() 获取 AVPacket。每次它返回 -53,这意味着 - 输出在当前状态下不可用 - 用户必须尝试发送输入。正如我所说,我在尝试获取某些内容并发送成功之前发送了一个输入。

这是我的代码:

初始化ffmpeg实体:

- (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
{
if (!videoFile) {
[self.delegate convertationFailed:@"VideoFile is nil!"];
return NO;
}
currentVideoFile = videoFile;
outputPath = path;
BOOL success = NO;

success = [self initFormatCtxAndCodecs:path];
if (!success) {
return NO;
}

success = [self addCameraStreams:videoFile];
if (!success) {
return NO;
}

success = [self openIOContext:path];
if (!success) {
return NO;
}

return YES;
}

- (BOOL)initFormatCtxAndCodecs:(NSString *)path
{
//AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
int ret = avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, [path UTF8String]);
if (ret < 0) {
NSLog(@"Couldn't create output context");
return NO;
}

//encoder codec init
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
NSLog(@"Couldn't find a encoder codec!");
return NO;
}

pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
NSLog(@"Couldn't alloc encoder codec context!");
return NO;
}

pCodecCtx->codec_tag = AV_CODEC_ID_H264;
pCodecCtx->bit_rate = 400000;
pCodecCtx->width = currentVideoFile.size.width;
pCodecCtx->height = currentVideoFile.size.height;
pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
pCodecCtx->gop_size = 10;
pCodecCtx->max_b_frames = 1;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
NSLog(@"Couldn't open the encoder codec!");
return NO;
}

pPacket = av_packet_alloc();

return YES;
}

- (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
{
AVCodecParameters *params = avcodec_parameters_alloc();
if (!params) {
NSLog(@"Couldn't allocate codec parameters!");
return NO;
}

if (avcodec_parameters_from_context(params, pCodecCtx) < 0) {
NSLog(@"Couldn't copy parameters from context!");
return NO;
}

for (int i = 0; i < videoFile.idCameras.count - 1; i++)
{
NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
if (!stream) {
NSLog(@"Couldn't alloc stream!");
return NO;
}

if (avcodec_parameters_copy(stream->codecpar, params) < 0) {
NSLog(@"Couldn't copy parameters into stream!");
return NO;
}

stream->avg_frame_rate.num = videoFile.framerate;
stream->avg_frame_rate.den = 1;
stream->codecpar->codec_tag = 0; //some silly workaround
stream->index = i;
streams[path] = [[VideoStream alloc] initWithStream:stream];
}

return YES;
}

- (BOOL)openIOContext:(NSString *)path
{
AVIOContext *ioCtx = nil;
if (avio_open(&ioCtx, [path UTF8String], AVIO_FLAG_WRITE) < 0) {
return NO;
}
pFormatCtx->pb = ioCtx;

return YES;
}

转换过程如下:

- (void)launchConvert:(DummyFVPVideoFile *)videoFile
{
BOOL convertInProgress = YES;
unsigned int frameCount = 1;
unsigned long pts = 0;
BOOL success = NO;

success = [self writeHeader];
if (!success) {
NSLog(@"Couldn't write header!");
return;
}

AVRational defaultTimeBase;
defaultTimeBase.num = 1;
defaultTimeBase.den = videoFile.framerate;
AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;

while (convertInProgress)
{
pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
for (NSString *path in streams.allKeys)
{
UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:@"%@/%u.jpg", path, frameCount]];
AVPacket *pkt = [self getAVPacket:img withPts:pts];
if (!pkt->data) { continue; }
pkt->stream_index = streams[path].stream->index;
//check all settings of pkt

if (![self writePacket:pkt]) {
NSLog(@"Couldn't write packet!");
convertInProgress = NO;
break;
}
}

frameCount++;
}

success = [self writeTrailer];
if (!success) {
NSLog(@"Couldn't write trailer!");
return;
}

NSLog(@"Convertation finished!");
//delegate convertationFinished method
}

- (BOOL)writeHeader
{
if (avformat_write_header(pFormatCtx, NULL) < 0) {
return NO;
}

return YES;
}

- (BOOL)writePacket:(AVPacket *)pkt
{
if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
return NO;
}

return YES;
}

- (BOOL)writeTrailer
{
if (av_write_trailer(pFormatCtx) != 0) {
return NO;
}

return YES;
}


/**
This method will create AVPacket out of UIImage.

@return AVPacket
*/
- (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
{
if (!img) {
NSLog(@"imgData is nil!");
return nil;
}
uint8_t *imgData = [self getPixelDataFromImage:img];

AVFrame *frame_yuv = av_frame_alloc();
if (!frame_yuv) {
NSLog(@"frame_yuv is nil!");
return nil;
}
frame_yuv->format = AV_PIX_FMT_YUV420P;
frame_yuv->width = (int)img.size.width;
frame_yuv->height = (int)img.size.height;

int ret = av_image_alloc(frame_yuv->data,
frame_yuv->linesize,
frame_yuv->width,
frame_yuv->height,
frame_yuv->format,
32);
if (ret < 0) {
NSLog(@"Couldn't alloc yuv frame!");
return nil;
}

struct SwsContext *sws_ctx = nil;
sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
(int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
const uint8_t *scaleData[1] = { imgData };
int inLineSize[1] = { 4 * img.size.width };
sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);

frame_yuv->pict_type = AV_PICTURE_TYPE_I;
frame_yuv->pts = pCodecCtx->frame_number;

ret = avcodec_send_frame(pCodecCtx, frame_yuv); //every time everything is fine
if (ret != 0) {
NSLog(@"Couldn't send yuv frame!");
return nil;
}

av_init_packet(pPacket);
pPacket->dts = pPacket->pts = pts;
do {
ret = avcodec_receive_packet(pCodecCtx, pPacket); //every time -35 error
NSLog(@"ret = %d", ret);
if (ret == AVERROR_EOF) {
NSLog(@"AVERROR_EOF!");
} else if (ret == AVERROR(EAGAIN)) {
NSLog(@"AVERROR(EAGAIN)");
} else if (ret == AVERROR(EINVAL)) {
NSLog(@"AVERROR(EINVAL)");
}
if (ret != 0) {
NSLog(@"Couldn't receive packet!");
//return nil;
}
} while ( ret == 0 );

free(imgData);
av_packet_unref(pPacket);
av_packet_free(pPacket);
av_frame_unref(&frame_yuv);
av_frame_free(&frame_yuv);
//perform other clean up and test dat shit

return pPacket;
}

任何见解都会有所帮助。谢谢!

最佳答案

可能有两个原因。

  1. 根据 FFmpeg 的文档,您可能需要将多个数据包提供给 avcodec_send_frame() 才能成功接收数据包返回。

  2. 我无法确认您是否为 pPacket 分配了足够大小的缓冲区。 av_packet_alloc()av_init_packet() 函数不会分配任何缓冲区,而是将其设置为 NULL。所以分配必须在init之后进行。您应该在某个地方手动或使用 av_new_packet(pPacket, SIZE) 分配缓冲区。

希望对您有所帮助。

关于ios - avcodec_receive_packet() 看不到输出,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49016511/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com