gpt4 book ai didi

core-audio - iOS RemoteIO - AudioUnitAddRenderNotify 回调

转载 作者:行者123 更新时间:2023-12-02 02:10:20 25 4
gpt4 key购买 nike

我正在尝试像这样使用 AudioUnitAddRenderNotify 从 RemoteIO 进行录音。基本上,我无法从我的输入总线 bus1 获取样本。 recordingCallback 不会超过这个:

   if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}

但是有人告诉我应该为每辆巴士每轮调用 recordingCallback。 IE。用 inBusNumber ==0 调用,然后 inBusNumber ==1,分别是输出(remoteIO out)和输入(recording bus)。

我该怎么做才能在我的输入总线上调用 recordingCallback 以便我可以录音?谢谢。

码头。

这是回调。

static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
NSLog(@"Entered recording callback");

// Only do pre render on bus 1
if (*ioActionFlags & kAudioUnitRenderAction_PostRender || inBusNumber != 1) {
return noErr;
}

RIO *rio = (RIO*)inRefCon;
AudioUnit rioUnit = rio->theAudioUnit;
//ExtAudioFileRef eaf = rio->outEAF;
AudioBufferList abl = rio->audioBufferList;

SInt32 samples[NUMBER_OF_SAMPLES]; // A large enough size to not have to worry about buffer overrun
abl.mNumberBuffers = 1;
abl.mBuffers[0].mData = &samples;
abl.mBuffers[0].mNumberChannels = 1;
abl.mBuffers[0].mDataByteSize = inNumberFrames * sizeof(SInt16);

OSStatus result;
result = AudioUnitRender(rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&abl);

if (noErr != result) { NSLog(@"Obtain recorded samples error! Error : %ld", result); }

NSLog(@"Bus %ld", inBusNumber);

// React to a recording flag, if recording, save the abl into own buffer, else ignore
if (rio->recording)
{
TPCircularBufferProduceBytes(&rio->buffer, abl.mBuffers[0].mData, inNumberFrames * sizeof(SInt16));
//rio->timeIncurred += (('p'float)inNumberFrames) / 44100.0;
//NSLog(@"Self-calculated time incurred: %f", rio->timeIncurred);
}

return noErr;

这是调用回调的代码。

- (void)setupAudioUnitRemoteIO {

UInt32 framesPerSlice = 0;
UInt32 framesPerSlicePropertySize = sizeof (framesPerSlice);
UInt32 sampleRatePropertySize = sizeof (_graphSampleRate);

// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

// Get audio units
status = AudioComponentInstanceNew(inputComponent, &_remoteIOUnit);

if (noErr != status) { NSLog(@"Get audio units error"); return; }

// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
if (noErr != status) { NSLog(@"Enable IO for recording error"); return; }

// Enable IO for playback
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));

if (noErr != status) { NSLog(@"Enable IO for playback error"); return; }

// Obtain the value of the maximum-frames-per-slice from the I/O unit.
status = AudioUnitGetProperty (
_remoteIOUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&framesPerSlice,
&framesPerSlicePropertySize
);

// Describe format

audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;

// Apply format
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));

if (noErr != status) { NSLog(@"Apply format to input bus error"); return; }

status = AudioUnitSetProperty(_remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (noErr != status) { NSLog(@"Apply format to output bus error"); return; }




rio.theAudioUnit = _remoteIOUnit; // Need this, as used in callbacks to refer to remoteIO

AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitAddRenderNotify(_remoteIOUnit, callbackStruct.inputProc, callbackStruct.inputProcRefCon);

NSAssert (status == noErr, @"Problem adding recordingCallback to RemoteIO. Error code: %d '%.4s'", (int) status, (const char *)&status);

最佳答案

我设法通过不使用 AudioUnitAddRenderNotify 并使用以下代码解决了这个问题。

AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = &rio;
status = AudioUnitSetProperty(_remoteIOUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
if (noErr != status) { NSLog(@"Set input callback error"); return; }

改为在输入总线上

关于core-audio - iOS RemoteIO - AudioUnitAddRenderNotify 回调,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/13263064/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com