gpt4 book ai didi

ios - 为ffmpeg rtsp流播放设置音频队列

转载 作者:塔克拉玛干 更新时间:2023-11-02 20:24:52 26 4
gpt4 key购买 nike

我正在使用 ffmpeg 为 iOS 开发一个 rtsp 流媒体(AAC 格式)客户端。现在我只能说我的应用程序是可以工作的,但是流式声音非常嘈杂,甚至有点失真,比用 vlc 或 mplayer 播放时差得多。

流由av_read_frame()读取,由avcodec_decode_audio3()解码。然后我将解码后的原始音频发送到音频队列。

当用我的应用程序解码本地 aac 文件时,声音似乎一点也不吵。我知道初始编码会极大地影响结果。但是至少我应该尝试让它听起来像其他流媒体客户端......

我的实现/修改中的许多部分实际上来自尝试和错误。我相信我在设置音频队列和填充音频缓冲区的回调函数时做错了什么。

非常感谢任何提示、建议或帮助。

//--av_dump_format()转储的测试资料信息 --

Metadata:
title : /demo/test.3gp
Duration: 00:00:30.11, start: 0.000000, bitrate: N/A
Stream #0:0: Audio: aac, 32000 Hz, stereo, s16
aac Advanced Audio Coding

//-- 音频队列设置过程 --

- (void) startPlayback
{
OSStatus err = 0;
if(playState.playing) return;

playState.started = false;

if(!playState.queue)
{

UInt32 bufferSize;


playState.format.mSampleRate = _av->audio.sample_rate;
playState.format.mFormatID = kAudioFormatLinearPCM;
playState.format.mFormatFlags = kAudioFormatFlagsCanonical;
playState.format.mChannelsPerFrame = _av->audio.channels_per_frame;
playState.format.mBytesPerPacket = sizeof(AudioSampleType) *_av->audio.channels_per_frame;
playState.format.mBytesPerFrame = sizeof(AudioSampleType) *_av->audio.channels_per_frame;
playState.format.mBitsPerChannel = 8 * sizeof(AudioSampleType);

playState.format.mFramesPerPacket = 1;
playState.format.mReserved = 0;


pauseStart = 0;
DeriveBufferSize(playState.format,playState.format.mBytesPerPacket,BUFFER_DURATION,&bufferSize,&numPacketsToRead);
err= AudioQueueNewOutput(&playState.format, aqCallback, &playState, NULL, kCFRunLoopCommonModes, 0, &playState.queue);

if(err != 0)
{
printf("AQHandler.m startPlayback: Error creating new AudioQueue: %d \n", (int)err);
}

for(int i = 0 ; i < NUM_BUFFERS ; i ++)
{
err = AudioQueueAllocateBufferWithPacketDescriptions(playState.queue, bufferSize, numPacketsToRead , &playState.buffers[i]);

if(err != 0)
printf("AQHandler.m startPlayback: Error allocating buffer %d", i);
fillAudioBuffer(&playState,playState.queue, playState.buffers[i]);
}

}

startTime = mu_currentTimeInMicros();

err=AudioQueueStart(playState.queue, NULL);

if(err)
{

char sErr[4];
printf("AQHandler.m startPlayback: Could not start queue %ld %s.", err, FormatError(sErr,err));

playState.playing = NO;
}
else
{
AudioSessionSetActive(true);
playState.playing = YES;
}
}

//-- 填充音频缓冲区的回调 --

static int ct = 0;
static void fillAudioBuffer(void *info,AudioQueueRef queue, AudioQueueBufferRef buffer)
{

int lengthCopied = INT32_MAX;
int dts= 0;
int isDone = 0;

buffer->mAudioDataByteSize = 0;
buffer->mPacketDescriptionCount = 0;

OSStatus err = 0;
AudioTimeStamp bufferStartTime;

AudioQueueGetCurrentTime(queue, NULL, &bufferStartTime, NULL);


PlayState *ps = (PlayState *)info;

if (!ps->started)
ps->started = true;

while(buffer->mPacketDescriptionCount < numPacketsToRead && lengthCopied > 0)
{
lengthCopied = getNextAudio(_av,
buffer->mAudioDataBytesCapacity-buffer->mAudioDataByteSize,
(uint8_t*)buffer->mAudioData+buffer->mAudioDataByteSize,
&dts,&isDone);

ct+= lengthCopied;

if(lengthCopied < 0 || isDone)
{
printf("nothing to read....\n\n");
PlayState *ps = (PlayState *)info;
ps->finished = true;
ps->started = false;
break;
}

if(aqStartDts < 0) aqStartDts = dts;

if(buffer->mPacketDescriptionCount ==0)
{
bufferStartTime.mFlags = kAudioTimeStampSampleTimeValid;
bufferStartTime.mSampleTime = (Float64)(dts-aqStartDts);//* _av->audio.frame_size;

if (bufferStartTime.mSampleTime <0 )
bufferStartTime.mSampleTime = 0;

printf("AQHandler.m fillAudioBuffer: DTS for %x: %lf time base: %lf StartDTS: %d\n",
(unsigned int)buffer,
bufferStartTime.mSampleTime,
_av->audio.time_base,
aqStartDts);

}

buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mStartOffset = buffer->mAudioDataByteSize;
buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mDataByteSize = lengthCopied;



buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mVariableFramesInPacket = 0;

buffer->mPacketDescriptionCount++;

buffer->mAudioDataByteSize += lengthCopied;
}

int audioBufferCount, audioBufferTotal, videoBufferCount, videoBufferTotal;
bufferCheck(_av,&videoBufferCount, &videoBufferTotal, &audioBufferCount, &audioBufferTotal);

if(buffer->mAudioDataByteSize)
{

err = AudioQueueEnqueueBufferWithParameters(queue, buffer, 0, NULL, 0, 0, 0, NULL, &bufferStartTime, NULL);

if(err)
{
char sErr[10];
printf("AQHandler.m fillAudioBuffer: Could not enqueue buffer 0x%x: %d %s.", buffer, err, FormatError(sErr, err));

}

}

}




int getNextAudio(video_data_t* vInst, int maxlength, uint8_t* buf, int* pts, int* isDone)
{

struct video_context_t *ctx = vInst->context;
int datalength = 0;

while(ctx->audio_ring.lock || (ctx->audio_ring.count <= 0 && ((ctx->play_state & STATE_DIE) != STATE_DIE)))
{

if (ctx->play_state & STATE_EOF) return -1;
usleep(100);
}

*pts = 0;
ctx->audio_ring.lock = kLocked;

if(ctx->audio_ring.count>0 && maxlength > ctx->audio_buffer[ctx->audio_ring.read].size)
{
memcpy(buf, ctx->audio_buffer[ctx->audio_ring.read].data,ctx->audio_buffer[ctx->audio_ring.read].size);

*pts = ctx->audio_buffer[ctx->audio_ring.read].pts;

datalength = ctx->audio_buffer[ctx->audio_ring.read].size;

ctx->audio_ring.read++;
ctx->audio_ring.read %= ABUF_SIZE;
ctx->audio_ring.count--;

}
ctx->audio_ring.lock = kUnlocked;

if((ctx->play_state & STATE_EOF) == STATE_EOF && ctx->audio_ring.count == 0) *isDone = 1;

return datalength;
}

最佳答案

声音失真的最可能原因是简单的数据包丢失,RTSP 很容易受到这种情况的影响,尤其是在无线连接上。

我建议您考虑将 ffmpeg 配置为尽可能使用基于 TCP 的连接,而不是默认的 RTP/UDP。

关于ios - 为ffmpeg rtsp流播放设置音频队列,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/8193569/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com