gpt4 book ai didi

iOS 核心音频生命周期 - AVAudioIONodeImpl.mm :365 -required condition is false: hwFormat

转载 作者:塔克拉玛干 更新时间:2023-11-02 20:34:03 25 4
gpt4 key购买 nike

我正在开发一个 iOS 应用程序,它由 2 个主要模块组成:一个基于 Core Audio 的音频分析模块,以及一个使用 AudioKit 的输出模块。

这是音频输入类:

import AVFoundation

typealias AudioInputCallback = (
_ timeStamp: Double,
_ numberOfFrames: Int,
_ samples: [Float]
) -> Void

/// Sets up an audio input session and notifies when new buffer data is available.
class AudioInputUtility: NSObject {

private(set) var audioUnit: AudioUnit!
var audioSession : AVAudioSession = AVAudioSession.sharedInstance()
var sampleRate: Float
var numberOfChannels: Int

/// When true, performs DC offset rejection on the incoming buffer before invoking the audioInputCallback.
var shouldPerformDCOffsetRejection: Bool = false

private let outputBus: UInt32 = 0
private let inputBus: UInt32 = 1
private var audioInputCallback: AudioInputCallback!

/// Instantiate a AudioInput.
/// - Parameter audioInputCallback: Invoked when audio data is available.
/// - Parameter sampleRate: The sample rate to set up the audio session with.
/// - Parameter numberOfChannels: The number of channels to set up the audio session with.

init(audioInputCallback callback: @escaping AudioInputCallback, sampleRate: Float = 44100.0, numberOfChannels: Int = 1) { // default values if not specified

self.sampleRate = sampleRate
self.numberOfChannels = numberOfChannels
audioInputCallback = callback
}

/// Start recording. Prompts for access to microphone if necessary.
func startRecording() {
do {

if self.audioUnit == nil {
setupAudioSession()
setupAudioUnit()
}

try self.audioSession.setActive(true)
var osErr: OSStatus = 0


osErr = AudioUnitInitialize(self.audioUnit)
assert(osErr == noErr, "*** AudioUnitInitialize err \(osErr)")
osErr = AudioOutputUnitStart(self.audioUnit)

assert(osErr == noErr, "*** AudioOutputUnitStart err \(osErr)")
} catch {
print("*** startRecording error: \(error)")
}
}

/// Stop recording.
func stopRecording() {
do {
var osErr: OSStatus = 0

osErr = AudioOutputUnitStop(self.audioUnit)
osErr = AudioUnitUninitialize(self.audioUnit)

assert(osErr == noErr, "*** AudioUnitUninitialize err \(osErr)")

try self.audioSession.setActive(false)

} catch {
print("*** error: \(error)")
}
}

private let recordingCallback: AURenderCallback = { (inRefCon, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData) -> OSStatus in

let audioInput = unsafeBitCast(inRefCon, to: AudioInputUtility.self)
var osErr: OSStatus = 0

// We've asked CoreAudio to allocate buffers for us, so just set mData to nil and it will be populated on AudioUnitRender().
var bufferList = AudioBufferList(
mNumberBuffers: 1,
mBuffers: AudioBuffer(
mNumberChannels: UInt32(audioInput.numberOfChannels),
mDataByteSize: 4,
mData: nil))

osErr = AudioUnitRender(audioInput.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList)
assert(osErr == noErr, "*** AudioUnitRender err \(osErr)")


// Move samples from mData into our native [Float] format.
var monoSamples = [Float]()
let ptr = bufferList.mBuffers.mData?.assumingMemoryBound(to: Float.self)
monoSamples.append(contentsOf: UnsafeBufferPointer(start: ptr, count: Int(inNumberFrames)))

if audioInput.shouldPerformDCOffsetRejection {
DCRejectionFilterProcessInPlace(&monoSamples, count: Int(inNumberFrames))
}

// Not compatible with Obj-C...
audioInput.audioInputCallback(inTimeStamp.pointee.mSampleTime / Double(audioInput.sampleRate),
Int(inNumberFrames),
monoSamples)

return 0
}

private func setupAudioSession() {

if !audioSession.availableCategories.contains(AVAudioSessionCategoryRecord) {
print("can't record! bailing.")
return
}

do {

//https://developer.apple.com/reference/avfoundation/avaudiosession/1669963-audio_session_categories
try audioSession.setCategory(AVAudioSessionCategoryRecord)

// "Appropriate for applications that wish to minimize the effect of system-supplied signal processing for input and/or output audio signals."
// NB: This turns off the high-pass filter that CoreAudio normally applies.


try audioSession.setMode(AVAudioSessionModeMeasurement)

try audioSession.setPreferredSampleRate(Double(sampleRate))

// NB: This is considered a 'hint' and more often than not is just ignored.

// number of seconds to record -> voglio 1024 samples
try audioSession.setPreferredIOBufferDuration(0.05)

audioSession.requestRecordPermission { (granted) -> Void in
if !granted {
print("*** record permission denied")
}
}
} catch {
print("*** audioSession error: \(error)")
}
}

private func setupAudioUnit() {

var componentDesc:AudioComponentDescription = AudioComponentDescription(
componentType: OSType(kAudioUnitType_Output),
componentSubType: OSType(kAudioUnitSubType_RemoteIO), // Always this for iOS.
componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
componentFlags: 0,
componentFlagsMask: 0)

var osErr: OSStatus = 0

// Get an audio component matching our description.
let component: AudioComponent! = AudioComponentFindNext(nil, &componentDesc)
assert(component != nil, "Couldn't find a default component")

// Create an instance of the AudioUnit
var tempAudioUnit: AudioUnit?
osErr = AudioComponentInstanceNew(component, &tempAudioUnit)
self.audioUnit = tempAudioUnit

assert(osErr == noErr, "*** AudioComponentInstanceNew err \(osErr)")

// Enable I/O for input.
var one:UInt32 = 1

osErr = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
inputBus,
&one,
UInt32(MemoryLayout<UInt32>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")


osErr = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputBus,
&one,
UInt32(MemoryLayout<UInt32>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")


// Set format to 32 bit, floating point, linear PCM
var streamFormatDesc:AudioStreamBasicDescription = AudioStreamBasicDescription(
mSampleRate: Double(sampleRate),
mFormatID: kAudioFormatLinearPCM,
mFormatFlags: kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved, // floating point data - docs say this is fastest
mBytesPerPacket: 4,
mFramesPerPacket: 1,
mBytesPerFrame: 4,
mChannelsPerFrame: UInt32(self.numberOfChannels),
mBitsPerChannel: 4 * 8,
mReserved: 0
)

// Set format for input and output busses

osErr = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, outputBus,
&streamFormatDesc,
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")


osErr = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&streamFormatDesc,
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")

// Set up our callback.
var inputCallbackStruct = AURenderCallbackStruct(inputProc: recordingCallback, inputProcRefCon: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque()))
osErr = AudioUnitSetProperty(audioUnit,
AudioUnitPropertyID(kAudioOutputUnitProperty_SetInputCallback),
AudioUnitScope(kAudioUnitScope_Global),
inputBus,
&inputCallbackStruct,
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")

// Ask CoreAudio to allocate buffers for us on render. (This is true by default but just to be explicit about it...)
osErr = AudioUnitSetProperty(audioUnit,
AudioUnitPropertyID(kAudioUnitProperty_ShouldAllocateBuffer),
AudioUnitScope(kAudioUnitScope_Output),
inputBus,
&one,
UInt32(MemoryLayout<UInt32>.size))
assert(osErr == noErr, "*** AudioUnitSetProperty err \(osErr)")
}
}

private func DCRejectionFilterProcessInPlace(_ audioData: inout [Float], count: Int) {

let defaultPoleDist: Float = 0.975
var mX1: Float = 0
var mY1: Float = 0

for i in 0..<count {
let xCurr: Float = audioData[i]
audioData[i] = audioData[i] - mX1 + (defaultPoleDist * mY1)
mX1 = xCurr
mY1 = audioData[i]
}
}

这是输出类:

private func initPlayer(){
do{

/*
let audioSession : AVAudioSession = AVAudioSession.sharedInstance()
//try audioSession.setActive(false)
try audioSession.setCategory(AVAudioSessionCategoryPlayback)
*/

// http://audiokit.io/playgrounds/Playback/Reading%20and%20Writing%20Audio%20Files/
let file = try AKAudioFile(readFileName: self.soundPath,
baseDir: .resources)

self.player = try AKAudioPlayer(file: file)

//player options
self.player!.looping = true




AKSettings.playbackWhileMuted = true
try AKSettings.setSession(category: .playback)
AudioKit.output = self.player



}catch{
print("Unresolved error \(error)")
}

}


public func stopMaskingSound(){

if(player!.isPlaying){
self.player!.stop()
}

if audioKitIsStarted == true{

AudioKit.stop()

self.audioKitIsStarted = false
}



}

如您所见,音频输入和输出由 2 个不同的类管理。

我遇到的问题是,如果我执行以下步骤:1)初始化播放器和记录 -> 停止它2) 播放输出 -> 停止3) 重新初始化播放器

在第 3 步我有这个异常(exception):

[central] 54:   ERROR:    [0x16dfc3000] >avae> AVAudioIONodeImpl.mm:365: _GetHWFormat: required condition is false: hwFormat
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: hwFormat'

有人知道它与什么有关吗?AudioKit <-> Core Audio 是否存在任何生命周期问题?

最佳答案

停止和重新启动音频单元可能会出现问题,因为音频进程的某些部分确实在另一个或多个线程中停止。一种可能的解决方法是在停止和重新启动之间允许大约 1 秒的延迟,以允许 RemoteIO 在尝试重新初始化它之前有时间异步停止。

关于iOS 核心音频生命周期 - AVAudioIONodeImpl.mm :365 -required condition is false: hwFormat,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42282929/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com