- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
所以,我有以下类(class):
AudioToolbox
和 CoreAudio
)记录音频。 AVFoundation
)AVFoundation
播放音频.
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
import UIKit
import CoreAudio
import AudioToolbox
class SpeechRecorder: NSObject {
static let sharedInstance = SpeechRecorder()
// MARK:- properties
@objc enum Status: Int {
case ready
case busy
case error
}
internal struct RecordState {
var format: AudioStreamBasicDescription
var queue: UnsafeMutablePointer<AudioQueueRef?>
var buffers: [AudioQueueBufferRef?]
var file: AudioFileID?
var currentPacket: Int64
var recording: Bool
};
private var _recordState: RecordState?
private var _audioURL:URL?
var format: AudioFormatID {
get { return _recordState!.format.mFormatID }
set { _recordState!.format.mFormatID = newValue }
}
var sampleRate: Float64 {
get { return _recordState!.format.mSampleRate }
set { _recordState!.format.mSampleRate = newValue }
}
var formatFlags: AudioFormatFlags {
get { return _recordState!.format.mFormatFlags }
set { _recordState!.format.mFormatFlags = newValue }
}
var channelsPerFrame: UInt32 {
get { return _recordState!.format.mChannelsPerFrame }
set { _recordState!.format.mChannelsPerFrame = newValue }
}
var bitsPerChannel: UInt32 {
get { return _recordState!.format.mBitsPerChannel }
set { _recordState!.format.mBitsPerChannel = newValue }
}
var framesPerPacket: UInt32 {
get { return _recordState!.format.mFramesPerPacket }
set { _recordState!.format.mFramesPerPacket = newValue }
}
var bytesPerFrame: UInt32 {
get { return _recordState!.format.mBytesPerFrame }
set { _recordState!.format.mBytesPerFrame = newValue }
}
var bytesPerPacket: UInt32 {
get { return _recordState!.format.mBytesPerPacket }
set { _recordState!.format.mBytesPerPacket = newValue }
}
//MARK: - Handlers
public var handler: ((_ status:Status, _ data:NSData?, _ errorDesc:String?) -> Void)?
// MARK:- Init
override init()
{
super.init()
self._recordState = RecordState(format: AudioStreamBasicDescription(),
queue: UnsafeMutablePointer<AudioQueueRef?>.allocate(capacity: 1),
buffers: [AudioQueueBufferRef?](repeating: nil, count: 1),
file: nil,
currentPacket: 0,
recording: false)
}//eom
// MARK:- OutputFile
private func getDocumentsPath()->URL
{
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let documentsDirectory = paths[0]
return documentsDirectory
}
func setOutputFileNameWithDocumentsDirectory(nameDesired:String)
{
_audioURL = getDocumentsPath().appendingPathComponent(nameDesired)
setOutputFile(url: _audioURL!)
}//eom
func setOutputFileNameWithTempDirectory(nameDesired:String)
{
let tempDir = NSTemporaryDirectory()
let tempURLdir = URL(fileURLWithPath: tempDir)
_audioURL = tempURLdir.appendingPathComponent(nameDesired)
setOutputFile(url: _audioURL!)
}//eom
private func setOutputFile(path: String)
{
setOutputFile(url: URL(fileURLWithPath: path))
}//eom
private func setOutputFile(url: URL)
{
AudioFileCreateWithURL(url as CFURL,
kAudioFileWAVEType,
&_recordState!.format,
AudioFileFlags.dontPageAlignAudioData.union(.eraseFile),
&_recordState!.file)
}
// MARK:- Start / Stop Recording
func start()
{
handler?(.busy, nil, nil)
self._recordState?.currentPacket = 0
let inputAudioQueue: AudioQueueInputCallback =
{ (userData: UnsafeMutableRawPointer?,
audioQueue: AudioQueueRef,
bufferQueue: AudioQueueBufferRef,
startTime: UnsafePointer<AudioTimeStamp>,
packets: UInt32,
packetDescription: UnsafePointer<AudioStreamPacketDescription>?) in
let internalRSP = unsafeBitCast(userData, to: UnsafeMutablePointer<RecordState>.self)
if packets > 0
{
var packetsReceived = packets
let outputStream:OSStatus = AudioFileWritePackets(internalRSP.pointee.file!,
false,
bufferQueue.pointee.mAudioDataByteSize,
packetDescription,
internalRSP.pointee.currentPacket,
&packetsReceived,
bufferQueue.pointee.mAudioData)
if outputStream != 0
{
if verbose
{
print("Error with AudioFileWritePackets")
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
}
//<----DEBUG
}
}
internalRSP.pointee.currentPacket += Int64(packetsReceived)
}
if internalRSP.pointee.recording
{
let outputStream:OSStatus = AudioQueueEnqueueBuffer(audioQueue, bufferQueue, 0, nil)
if outputStream != 0
{
if verbose
{
print("Error with AudioQueueEnqueueBuffer")
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
//<----DEBUG
}
}
}
}
}
let queueResults = AudioQueueNewInput(&_recordState!.format, inputAudioQueue, &_recordState, nil, nil, 0, _recordState!.queue)
if queueResults == 0
{
let bufferByteSize: Int = calculate(format: _recordState!.format, seconds: 0.5)
for index in (0..<_recordState!.buffers.count)
{
AudioQueueAllocateBuffer(_recordState!.queue.pointee!, UInt32(bufferByteSize), &_recordState!.buffers[index])
AudioQueueEnqueueBuffer(_recordState!.queue.pointee!, _recordState!.buffers[index]!, 0, nil)
}
AudioQueueStart(_recordState!.queue.pointee!, nil)
_recordState?.recording = true
}
else
{
handler?(.error, nil, "Error setting audio input.")
}
}//eom
func stop()
{
_recordState?.recording = false
if let recordingState: RecordState = _recordState
{
AudioQueueStop(recordingState.queue.pointee!, true)
AudioQueueDispose(recordingState.queue.pointee!, true)
AudioFileClose(recordingState.file!)
let audioData:NSData? = NSData(contentsOf: _audioURL!)
handler?(.ready, audioData, nil)
}
}//eom
// MARK:- Helper methods
func calculate(format: AudioStreamBasicDescription, seconds: Double) -> Int
{
let framesRequiredForBufferTime = Int(ceil(seconds * format.mSampleRate))
if framesRequiredForBufferTime > 0
{
return (framesRequiredForBufferTime * Int(format.mBytesPerFrame))
}
else
{
var maximumPacketSize = UInt32(0)
if format.mBytesPerPacket > 0
{
maximumPacketSize = format.mBytesPerPacket
}
else
{
audioQueueProperty(propertyId: kAudioQueueProperty_MaximumOutputPacketSize, value: &maximumPacketSize)
}
var packets = 0
if format.mFramesPerPacket > 0
{
packets = (framesRequiredForBufferTime / Int(format.mFramesPerPacket))
} else
{
packets = framesRequiredForBufferTime
}
if packets == 0
{
packets = 1
}
return (packets * Int(maximumPacketSize))
}
}//eom
func audioQueueProperty<T>(propertyId: AudioQueuePropertyID, value: inout T)
{
let propertySize = UnsafeMutablePointer<UInt32>.allocate(capacity: 1)
propertySize.pointee = UInt32(MemoryLayout<T>.size)
let queueResults = AudioQueueGetProperty(_recordState!.queue.pointee!, propertyId, &value, propertySize)
propertySize.deallocate(capacity: 1)
if queueResults != 0 {
handler?(.error, nil, "Unable to get audio queue property.")
}
}//eom
}
import UIKit
import AVFoundation
protocol AudioPlayerDelegate {
func audioPlayer_playbackError(playerItemID:String, error:String)
func audioPlayer_playbackSuccess(playerItemID:String)
}
class AudioPlayer: NSObject, AVAudioPlayerDelegate
{
//properties
private var _audioPlayer:AVAudioPlayer?
var delegate:AudioPlayerDelegate?
var playerItemID:String = ""
var volume:Float?
//MARK: - Play Audio
func playAudioFromData(_ playerItemID:String, dataToPlay:Data)
{
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
_audioPlayer = try AVAudioPlayer(data: dataToPlay)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
_audioPlayer?.delegate = self
//volume
if volume != nil {
_audioPlayer?.volume = volume!
}
//id
self.playerItemID = playerItemID
_audioPlayer?.play()
}
catch let error {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error.localizedDescription)
}
}//eom
func playAudioFromUrl(_ url:URL)
{
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
if FileManager.default.fileExists(atPath: url.path) {
_audioPlayer = try AVAudioPlayer(contentsOf: url)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
_audioPlayer?.delegate = self
//volume
if volume != nil {
_audioPlayer?.volume = volume!
}
//id
self.playerItemID = url.absoluteString
_audioPlayer?.play()
}
else {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "audio file does not exist")
}
}
catch let error {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error.localizedDescription)
}
}//eom
//MARK: - Player Options
func pausePlay()
{
_audioPlayer?.pause()
}//eom
func stopPlay()
{
_audioPlayer?.stop()
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
}//eom
//MARK: - Delegates
func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) {
//inactive session
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
//report status
if error != nil {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error!.localizedDescription)
}
else {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "decode error did occurred")
}
//reset
self._audioPlayer?.delegate = nil
self._audioPlayer = nil
self.playerItemID = ""
}//eom
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
//inactive session
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
//report status
if flag {
delegate?.audioPlayer_playbackSuccess(playerItemID: self.playerItemID)
}
else {
delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "player finish playing with error")
}
//reset
self._audioPlayer?.delegate = nil
self._audioPlayer = nil
self.playerItemID = ""
}//eom
}//eoc
最佳答案
如果您要同时使用 AudioToolBox 和 AVFoundation,您可能需要小心使用 AudioSession。 AVFoundation 对后端的 AudioSession 做了很多更新。
对您的播放器的快速修复是删除任何 Audio Session 调用,如下所示:
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
关于ios - AudioToolBox Recorder 受到 AVFoundation AudioPlayer 的影响,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/41230997/
我有两个使用 audiotoolbox 框架来播放短 mp3 文件的应用程序。部署目标是:4.0基础SDK为:iOS 5.0 SDK 我有一台运行 iOS 5.0 的 iPhone 4 和一台运行 i
我最近一直在为 iOS 开发一个非常具体的项目,我的研究使我几乎完成了最终代码。我已经解决了到目前为止我发现的所有极端困难,但是在这个问题上我似乎没有任何线索(关于解决它的原因和可能性)。 我设置了我
我想实现一个音频管理器。但是我遇到了内存泄漏。我不知道为什么和会发生什么。有人可以帮助我吗? 我创建了一个按钮,按钮事件只运行带有音频路径的 playAudio。然后,我单击按钮,单击,单击,单击,.
我对 AUAudioFilePlayer 的以下属性感到困惑。 Apple 的文档充其量是令人困惑的: kAudioUnitProperty_ScheduleStartTimeStamp kAudio
当我使用 AudioToolBox 播放音乐时,内存泄漏严重。 AVAudioPlayer *newMusicPlayer = [[AVAudioPlayer alloc] initWithData:
我使用 OpenAL 在我的应用程序中播放声音。当我使用 Instruments 工具测试它时,它发现了泄漏: LeakedObject = GeneralBlock-512 大小 = 512 字节
我正在尝试在OS X上构建Ardour。我成功运行了./waf configure,但是尝试使用./waf进行编译会导致在尝试#include文件时找不到大量文件: ../libs/appleutil
我想用我的 iPhone 录制音频信号,然后我想逐一访问该信号的样本。我在网上找到了一些类似的例子,但我从未找到一个例子(在 Swift 中)来实际读取和操作所有原始样本。我怎样才能使用 Swift
我正在尝试使用 AudioToolbox API 使用 Swift 2.0 和 Xcode 7b6。 API 使用了大量的 C 语言结构,包括函数指针。这是我第一次使用像 withUnsafeMuta
我想在我的应用程序启动时播放音频,但首先,音频无法播放!不管我怎么编码。其次,如果可以的话,我很想循环播放它。 @implementation ViewController -(void)awakeF
我目前正在使用 OpenAL 播放游戏音乐。它工作正常,除了它不适用于原始 WAV 文件之外的任何东西。这意味着我最终得到了大约 9mb 的配乐。 我是 OpenAL 的新手,我直接使用来自 Appl
我有两种不同的按钮按下声音 Action 。当我按下一个按钮时,它应该播放一种声音,当我按下另一个按钮时,它应该停止第一种声音并播放另一种声音,我该怎么做? 谢谢, -(void) onButtonP
所以我尝试将 AudioToolbox 与 RubyMotion 一起使用, 1、添加 app.frameworks << 'AudioToolbox' 在 Rakefile 中, 2、在我的一个简单
AVAudioPlayer 对象是否存在内存引导问题?在模拟器中使用 AVAudioPlayer 时出现内存泄漏。我如何创建 AVAudioPlayer 并不重要。我使用了 initWithConte
我发现了一个使用工具箱播放声音的功能,创建它的人获得了荣誉: -(SystemSoundID) createSoundID: (NSString*)name { NSString *path
我正在尝试运行现有应用程序以查看它与最新 Xcode 测试版的配合情况,并尝试在现有项目中使用 SwiftUI。当我在模拟器上运行项目时,我得到以下运行时崩溃: dyld: Symbol not fo
我使用 AudioToolBox 框架中的 ExtAudioFileCreateWithURL 和 ExtAudioFileWrite 将样本保存到文件中。但现在我需要将其保存到 NSFileWrap
所以,我有以下类(class): 录音机(使用 AudioToolbox 和 CoreAudio )记录音频。 音频播放器(使用 AVFoundation ) 录音机捕获音频,将其发送到服务器,然后服
我正在尝试使用混音器和 io 单元创建音频图。 io 单元将从麦克风接收音频并将其发送到混音器,混音器会将其与外部声音混合,然后从扬声器播放。我已经创建了如下所示的音频图,并尝试尽可能地遵循指南。但是
Apple 的文档: AudioToolbox 框架引用 > 音频文件服务引用 > 读写音频文件 > AudioFileWriteBytes Declaration Swift func AudioF
我是一名优秀的程序员,十分优秀!