gpt4 book ai didi

iphone - 音频单元,设置格式失败,返回 -10581

转载 作者:行者123 更新时间:2023-12-03 20:53:54 26 4
gpt4 key购买 nike

我不明白为什么以下返回 OSStatus -10851:

status = AudioUnitSetProperty(*audioUnit, 
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));

适用于模拟器,但不适用于设备。

这是其余的代码:

    #import "VoipRecorder.h"
#import <AudioToolbox/AudioToolbox.h>
#import <CoreAudio/CoreAudioTypes.h>

#define kOutputBus 0
#define kInputBus 1

void SetAUCanonical(AudioStreamBasicDescription *format, UInt32 nChannels, bool interleaved)
// note: leaves sample rate untouched
{
format->mFormatID = kAudioFormatLinearPCM;
#if TARGET_IPHONE_SIMULATOR
int sampleSize = sizeof(Float32);
format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
#else
int sampleSize = sizeof(AudioSampleType);
format->mFormatFlags = kAudioFormatFlagsCanonical;
#endif
format->mBitsPerChannel = 8 * sampleSize;
format->mChannelsPerFrame = nChannels;
format->mFramesPerPacket = 1;
if (interleaved)
format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize;
else {
format->mBytesPerPacket = format->mBytesPerFrame = sampleSize;
format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
}
}

int SetupRemoteIO (AudioUnit *audioUnit, AURenderCallbackStruct inRenderProc, AURenderCallbackStruct inOutputProc, AudioStreamBasicDescription * outFormat)
{
OSStatus status;

// Open the output unit
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;

AudioComponent comp = AudioComponentFindNext(NULL, &desc);

AudioComponentInstanceNew(comp, audioUnit);

UInt32 flag = 1;
// Enable IO for recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));

assert(status == 0);

// Enable IO for playback
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));

assert(status == 0);

// set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
SetAUCanonical(outFormat, 1, NO);

outFormat->mSampleRate = 44100.00; //8000;

//Apply format
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&outFormat,
sizeof(outFormat));

assert(status == 0);

status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&outFormat,
sizeof(outFormat));

// Setup callbacks
// Recording
status = AudioUnitSetProperty(*audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input,
kInputBus,
&inRenderProc,
sizeof(inRenderProc));
assert(status == 0);

// Playback
status = AudioUnitSetProperty(*audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Output,
kOutputBus,
&inOutputProc,
sizeof(inOutputProc));
assert(status == 0);

status = AudioUnitInitialize(*audioUnit);
assert(status == 0);

return 0;
}

@implementation VoipRecorder

@synthesize audioUnit;

- (id)init
{
self = [super init];
if (self) {

}

return self;
}

void rioInterruptionListener(void *inClientData, UInt32 inInterruption)
{
printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption");

VoipRecorder *THIS = (VoipRecorder*)inClientData;

if (inInterruption == kAudioSessionEndInterruption) {
// make sure we are again the active session
AudioSessionSetActive(true);
AudioOutputUnitStart(THIS.audioUnit);
}

if (inInterruption == kAudioSessionBeginInterruption) {
AudioOutputUnitStop(THIS.audioUnit);
}
}

int buffer[1000000];
int bufferSize = 2;

static OSStatus PerformSpeaker(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
NSLog(@"Speaker");

if (bufferSize == 0) {
return 0;
}

if (ioData == NULL) {
NSLog(@"err");
return 0;
}

return 0;
}

AudioBufferList *AllocateBuffers(UInt32 nBytes)
{
int channelCount = 2;

AudioBufferList *audioBufferList;
audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
audioBufferList->mNumberBuffers = 1;
audioBufferList->mBuffers[0].mNumberChannels = channelCount;
audioBufferList->mBuffers[0].mDataByteSize = nBytes;
audioBufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(nBytes);

return audioBufferList;
}

static OSStatus PerformThru(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
VoipRecorder *THIS = (VoipRecorder *)inRefCon;

AudioBufferList *bufferList = AllocateBuffers(inNumberFrames*2);

OSStatus err = AudioUnitRender(THIS.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, bufferList);
if (err) {
printf("PerformThru: error %d\n", (int)err);
free(bufferList);
return err;
}


free(bufferList);

return 0;
}


- (void)setupAudio {

OSStatus status;

inputProc.inputProc = PerformThru;
inputProc.inputProcRefCon = self;

outputProc.inputProc = PerformSpeaker;
outputProc.inputProcRefCon = self;

buffer[0] = 0x4444;
buffer[1] = 0xffff;
status = AudioSessionInitialize(NULL, NULL, rioInterruptionListener, self);
assert(status == 0);

UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);
assert(status == 0);

Float32 preferredBufferSize = .005;
status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);
assert(status == 0);

UInt32 size = sizeof(hwSampleRate);
status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate);
assert(status == 0);

status = AudioSessionSetActive(true);
assert(status == 0);

status = SetupRemoteIO(&audioUnit, inputProc, outputProc, &thruFormat);
assert(status == 0);

status = AudioOutputUnitStart(audioUnit);
assert(status == 0);

size = sizeof(thruFormat);
status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, &size);
assert(status == 0);

//NSLog(@"0x%X", status);
}

最佳答案

需要检查的两个可能项目:您发布的代码混合使用了 AudioSampleType 和 AudioUnitSampleType,这是两种不同大小的数据类型。您还只在 1 个数据通道上指定 kAudioFormatFlagIsNonInterleaved 标志,这可能没有必要。

关于iphone - 音频单元,设置格式失败,返回 -10581,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/6755277/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com