gpt4 book ai didi

ffmpeg - CoreAudio 输出采样率差异

转载 作者:行者123 更新时间:2023-12-04 23:14:43 25 4
gpt4 key购买 nike

我正在使用 ffmpeg 从 .mov 文件中获取音频。查看我的设置,我没有转换我正在生成的音频缓冲区的采样率,因此这不太可能解决我遇到的问题。无论我在内置输出上设置的采样率如何,我的 44.1 kHz 音频文件都会以正确的速率播放。如果我播放 48kHz 文件,文件播放速度较慢(以正常速率的 91%),这表明真实速率为 44.1kHz。我可以将内置输出更改为 44.1、48 或 96 kHz,并且存在相同的现象。我使用 Audio Midi Setup 应用程序更改了我的默认输出速率。然后我在我的 ouputAudioUnit 上使用 AudioUnitGetProperty 验证我的采样率。这与 Audio Midi 设置中的采样率相匹配。

想法?我包括我的音频图代码

CheckError(NewAUGraph(&fp.graph), "Couldn't create a new AUGraph");

//varispeednode has an input callback
//the vairspeed node feeds an output node which is running
//at the frequency of the system default output

AUNode outputNode;
AudioComponentDescription outputcd = [self defaultOutputComponent];
CheckError(AUGraphAddNode(fp.graph, &outputcd, &outputNode),
"AUGraphAddNode[kAudioUnitSubType_DefaultOutput] failed");

AUNode varispeedNode;
AudioComponentDescription varispeedcd = [self variSpeedComponent];
CheckError(AUGraphAddNode(fp.graph, &varispeedcd, &varispeedNode),
"AUGraphAddNode[kAudioUnitSubType_Varispeed] failed");

CheckError(AUGraphOpen(fp.graph),
"Couldn't Open AudioGraph");

CheckError(AUGraphNodeInfo(fp.graph, outputNode, NULL, &fp.outputAudioUnit),
"Couldn't Retrieve output node");

CheckError(AUGraphNodeInfo(fp.graph, varispeedNode, NULL, &fp.variSpeedAudioUnit),
"Couldn't Retrieve Varispeed Audio Unit");

AURenderCallbackStruct input;
input.inputProc = CBufferProviderCallback;
input.inputProcRefCon = &playerStruct;
CheckError(AudioUnitSetProperty(fp.variSpeedAudioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty failed");


CheckError(AUGraphConnectNodeInput(fp.graph, varispeedNode, 0, outputNode, 0),
"Couldn't Connect varispeed to output");

CheckError(AUGraphInitialize(fp.graph),
"Couldn't Initialize AUGraph");

// check output sample rate

Float64 outputSampleRate = 48000.0;
UInt32 sizeOfFloat64 = sizeof(Float64);

outputSampleRate = 0.0;
CheckError(AudioUnitGetProperty(fp.outputAudioUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Global,
0,
&outputSampleRate,
&sizeOfFloat64),
"Couldn't get output sampleRate");

最佳答案

我解决了这个问题。构建音频图时,您需要指定变速音频单元的输入采样率,然后再将其连接到 augraph 内的输出节点。请参阅示例代码
https://developer.apple.com/library/content/samplecode/CAPlayThrough/Listings/ReadMe_txt.html

CheckError(NewAUGraph(&fp.graph), "BuildGraphError");

AUNode outputNode;
AudioComponentDescription outputcd = [self defaultOutputComponent];
CheckError(AUGraphAddNode(fp.graph, &outputcd, &outputNode),
"AUGraphAddNode[kAudioUnitSubType_DefaultOutput] failed");

AUNode varispeedNode;
AudioComponentDescription varispeedcd = [self variSpeedComponent];
CheckError(AUGraphAddNode(fp.graph, &varispeedcd, &varispeedNode),
"AUGraphAddNode[kAudioUnitSubType_Varispeed] failed");

CheckError(AUGraphOpen(fp.graph),
"Couldn't Open AudioGraph");

CheckError(AUGraphNodeInfo(fp.graph, outputNode, NULL, &fp.outputAudioUnit),
"Couldn't Retrieve File Audio Unit");

CheckError(AUGraphNodeInfo(fp.graph, varispeedNode, NULL, &fp.variSpeedAudioUnit),
"Couldn't Retrieve Varispeed Audio Unit");

AURenderCallbackStruct input;
input.inputProc = CBufferProviderCallback;
input.inputProcRefCon = &playerStruct;
CheckError(AudioUnitSetProperty(fp.variSpeedAudioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty failed");

//you have to set the varispeed rate before you connect it
//see CAPlayThrough

AudioStreamBasicDescription asbd = {0};
UInt32 size;
Boolean outWritable;

//Gets the size of the Stream Format Property and if it is writable
OSStatus result = AudioUnitGetPropertyInfo(fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&size,
&outWritable);

//Get the current stream format of the output
result = AudioUnitGetProperty (fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&asbd,
&size);

asbd.mSampleRate = targetSampleRate;

//Set the stream format of the output to match the input
result = AudioUnitSetProperty (fp.variSpeedAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&asbd,
size);

printf("AudioUnitSetProperty result %d %d\n", result, noErr);



CheckError(AUGraphConnectNodeInput(fp.graph, varispeedNode, 0, outputNode, 0),
"Couldn't Connect varispeed to output");

CheckError(AUGraphInitialize(fp.graph),
"Couldn't Initialize AUGraph");

Float64 outputSampleRate = 48000.0;
UInt32 sizeOfFloat64 = sizeof(Float64);

outputSampleRate = 0.0;
CheckError(AudioUnitGetProperty(fp.outputAudioUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Global,
0,
&outputSampleRate,
&sizeOfFloat64),
"Couldn't get output sampleRate");

NSLog(@"Output Sample Rate of the ->%f", outputSampleRate);

关于ffmpeg - CoreAudio 输出采样率差异,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50083388/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com