- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
所以,我有以下类(class):
AudioToolbox
和 CoreAudio
)记录音频。 AVFoundation
)AVFoundation
播放音频.
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
import UIKit
import CoreAudio
import AudioToolbox
class SpeechRecorder: NSObject {
static let sharedInstance = SpeechRecorder()
// MARK:- properties
@objc enum Status: Int {
case ready
case busy
case error
}
internal struct RecordState {
var format: AudioStreamBasicDescription
var queue: UnsafeMutablePointer<AudioQueueRef?>
var buffers: [AudioQueueBufferRef?]
var file: AudioFileID?
var currentPacket: Int64
var recording: Bool
};
private var _recordState: RecordState?
private var _audioURL:URL?
var format: AudioFormatID {
get { return _recordState!.format.mFormatID }
set { _recordState!.format.mFormatID = newValue }
}
var sampleRate: Float64 {
get { return _recordState!.format.mSampleRate }
set { _recordState!.format.mSampleRate = newValue }
}
var formatFlags: AudioFormatFlags {
get { return _recordState!.format.mFormatFlags }
set { _recordState!.format.mFormatFlags = newValue }
}
var channelsPerFrame: UInt32 {
get { return _recordState!.format.mChannelsPerFrame }
set { _recordState!.format.mChannelsPerFrame = newValue }
}
var bitsPerChannel: UInt32 {
get { return _recordState!.format.mBitsPerChannel }
set { _recordState!.format.mBitsPerChannel = newValue }
}
var framesPerPacket: UInt32 {
get { return _recordState!.format.mFramesPerPacket }
set { _recordState!.format.mFramesPerPacket = newValue }
}
var bytesPerFrame: UInt32 {
get { return _recordState!.format.mBytesPerFrame }
set { _recordState!.format.mBytesPerFrame = newValue }
}
var bytesPerPacket: UInt32 {
get { return _recordState!.format.mBytesPerPacket }
set { _recordState!.format.mBytesPerPacket = newValue }
}
//MARK: - Handlers
public var handler: ((_ status:Status, _ data:NSData?, _ errorDesc:String?) -> Void)?
// MARK:- Init
override init()
{
super.init()
self._recordState = RecordState(format: AudioStreamBasicDescription(),
queue: UnsafeMutablePointer<AudioQueueRef?>.allocate(capacity: 1),
buffers: [AudioQueueBufferRef?](repeating: nil, count: 1),
file: nil,
currentPacket: 0,
recording: false)
}//eom
// MARK:- OutputFile
private func getDocumentsPath()->URL
{
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let documentsDirectory = paths[0]
return documentsDirectory
}
func setOutputFileNameWithDocumentsDirectory(nameDesired:String)
{
_audioURL = getDocumentsPath().appendingPathComponent(nameDesired)
setOutputFile(url: _audioURL!)
}//eom
func setOutputFileNameWithTempDirectory(nameDesired:String)
{
let tempDir = NSTemporaryDirectory()
let tempURLdir = URL(fileURLWithPath: tempDir)
_audioURL = tempURLdir.appendingPathComponent(nameDesired)
setOutputFile(url: _audioURL!)
}//eom
private func setOutputFile(path: String)
{
setOutputFile(url: URL(fileURLWithPath: path))
}//eom
private func setOutputFile(url: URL)
{
AudioFileCreateWithURL(url as CFURL,
kAudioFileWAVEType,
&_recordState!.format,
AudioFileFlags.dontPageAlignAudioData.union(.eraseFile),
&_recordState!.file)
}
// MARK:- Start / Stop Recording
func start()
{
handler?(.busy, nil, nil)
self._recordState?.currentPacket = 0
let inputAudioQueue: AudioQueueInputCallback =
{ (userData: UnsafeMutableRawPointer?,
audioQueue: AudioQueueRef,
bufferQueue: AudioQueueBufferRef,
startTime: UnsafePointer<AudioTimeStamp>,
packets: UInt32,
packetDescription: UnsafePointer<AudioStreamPacketDescription>?) in
let internalRSP = unsafeBitCast(userData, to: UnsafeMutablePointer<RecordState>.self)
if packets > 0
{
var packetsReceived = packets
let outputStream:OSStatus = AudioFileWritePackets(internalRSP.pointee.file!,
false,
bufferQueue.pointee.mAudioDataByteSize,
packetDescription,
internalRSP.pointee.currentPacket,
&packetsReceived,
bufferQueue.pointee.mAudioData)
if outputStream != 0
{
if verbose
{
print("Error with AudioFileWritePackets")
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
}
//<----DEBUG
}
}
internalRSP.pointee.currentPacket += Int64(packetsReceived)
}
if internalRSP.pointee.recording
{
let outputStream:OSStatus = AudioQueueEnqueueBuffer(audioQueue, bufferQueue, 0, nil)
if outputStream != 0
{
if verbose
{
print("Error with AudioQueueEnqueueBuffer")
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
//<----DEBUG
}
}
}
}
}
let queueResults = AudioQueueNewInput(&_recordState!.format, inputAudioQueue, &_recordState, nil, nil, 0, _recordState!.queue)
if queueResults == 0
{
let bufferByteSize: Int = calculate(format: _recordState!.format, seconds: 0.5)
for index in (0..<_recordState!.buffers.count)
{
AudioQueueAllocateBuffer(_recordState!.queue.pointee!, UInt32(bufferByteSize), &_recordState!.buffers[index])
AudioQueueEnqueueBuffer(_recordState!.queue.pointee!, _recordState!.buffers[index]!, 0, nil)
}
AudioQueueStart(_recordState!.queue.pointee!, nil)
_recordState?.recording = true
}
else
{
handler?(.error, nil, "Error setting audio input.")
}
}//eom
func stop()
{
_recordState?.recording = false
if let recordingState: RecordState = _recordState
{
AudioQueueStop(recordingState.queue.pointee!, true)
AudioQueueDispose(recordingState.queue.pointee!, true)
AudioFileClose(recordingState.file!)
let audioData:NSData? = NSData(contentsOf: _audioURL!)
handler?(.ready, audioData, nil)
}
}//eom
// MARK:- Helper methods
func calculate(format: AudioStreamBasicDescription, seconds: Double) -> Int
{
let framesRequiredForBufferTime = Int(ceil(seconds * format.mSampleRate))
if framesRequiredForBufferTime > 0
{
return (framesRequiredForBufferTime * Int(format.mBytesPerFrame))
}
else
{
var maximumPacketSize = UInt32(0)
if format.mBytesPerPacket > 0
{
maximumPacketSize = format.mBytesPerPacket
}
else
{
audioQueueProperty(propertyId: kAudioQueueProperty_MaximumOutputPacketSize, value: &maximumPacketSize)
}
var packets = 0
if format.mFramesPerPacket > 0
{
packets = (framesRequiredForBufferTime / Int(format.mFramesPerPacket))
} else
{
packets = framesRequiredForBufferTime
}
if packets == 0
{
packets = 1
}
return (packets * Int(maximumPacketSize))
}
}//eom
func audioQueueProperty<T>(propertyId: AudioQueuePropertyID, value: inout T)
{
let propertySize = UnsafeMutablePointer<UInt32>.allocate(capacity: 1)
propertySize.pointee = UInt32(MemoryLayout<T>.size)
let queueResults = AudioQueueGetProperty(_recordState!.queue.pointee!, propertyId, &value, propertySize)
propertySize.deallocate(capacity: 1)
if queueResults != 0 {
handler?(.error, nil, "Unable to get audio queue property.")
}
}//eom
}
import UIKit
import AVFoundation
protocol AudioPlayerDelegate {
func audioPlayer_playbackError(playerItemID:String, error:String)
func audioPlayer_playbackSuccess(playerItemID:String)
}
class AudioPlayer: NSObject, AVAudioPlayerDelegate
{
//properties
private var _audioPlayer:AVAudioPlayer?
var delegate:AudioPlayerDelegate?
var playerItemID:String = ""
var volume:Float?
//MARK: - Play Audio
func playAudioFromData(_ playerItemID:String, dataToPlay:Data)
{
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
_audioPlayer = try AVAudioPlayer(data: dataToPlay)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
_audioPlayer?.delegate = self
//volume
if volume != nil {
_audioPlayer?.volume = volume!
}
//id
self.playerItemID = playerItemID
_audioPlayer?.play()
}
catch let error {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error.localizedDescription)
}
}//eom
func playAudioFromUrl(_ url:URL)
{
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
if FileManager.default.fileExists(atPath: url.path) {
_audioPlayer = try AVAudioPlayer(contentsOf: url)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
_audioPlayer?.delegate = self
//volume
if volume != nil {
_audioPlayer?.volume = volume!
}
//id
self.playerItemID = url.absoluteString
_audioPlayer?.play()
}
else {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "audio file does not exist")
}
}
catch let error {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error.localizedDescription)
}
}//eom
//MARK: - Player Options
func pausePlay()
{
_audioPlayer?.pause()
}//eom
func stopPlay()
{
_audioPlayer?.stop()
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
}//eom
//MARK: - Delegates
func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) {
//inactive session
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
//report status
if error != nil {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: error!.localizedDescription)
}
else {
self.delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "decode error did occurred")
}
//reset
self._audioPlayer?.delegate = nil
self._audioPlayer = nil
self.playerItemID = ""
}//eom
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
//inactive session
do {
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setActive(false)
}
catch let error {
if verbose { print("un-able to set session to inactive, error: \(error)") }
}
//report status
if flag {
delegate?.audioPlayer_playbackSuccess(playerItemID: self.playerItemID)
}
else {
delegate?.audioPlayer_playbackError(playerItemID: self.playerItemID, error: "player finish playing with error")
}
//reset
self._audioPlayer?.delegate = nil
self._audioPlayer = nil
self.playerItemID = ""
}//eom
}//eoc
最佳答案
如果您要同时使用 AudioToolBox 和 AVFoundation,您可能需要小心使用 AudioSession。 AVFoundation 对后端的 AudioSession 做了很多更新。
对您的播放器的快速修复是删除任何 Audio Session 调用,如下所示:
let sharedSession = AVAudioSession.sharedInstance()
try sharedSession.setCategory(AVAudioSessionCategoryPlayback)
try sharedSession.setActive(true)
_audioPlayer?.numberOfLoops = 0
_audioPlayer?.isMeteringEnabled = true
关于ios - AudioToolBox Recorder 受到 AVFoundation AudioPlayer 的影响,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/41230997/
当我在 iOS 上测试它时,它工作正常,但是当我尝试在 XCode 上存档时,它给出以下错误: Module 'audioplayers' not found 我使用audioplayers 0.13
我想使用 quiver async 中的节拍器类和 audioplayers 包在 Flutter 中编写一个节拍器应用程序。但是,音频播放器发出的咔嗒声总是延迟播放。有时会有几个小节没有声音,然后是
我将程序的文本复制到另一台计算机,但导入 sun.audio.AudioPlayer 时出现问题类(class)。编译器在导入字符串下划线,当我将鼠标放在它上面时,有这段文字,我不太明白: Acces
我在MATLAB中使用sound()函数生成音调。以下功能会以440Hz的频率播放4秒钟的声音: duration = 4 toneFreq = 440 samplesPerSecond = 4410
我在 flutter 中使用 audioplayers 库,我试图保存和恢复播放器位置,除了从第一个位置播放,就像缓存播放器搜索栏位置一样,在这段代码中,我尝试使用 SharedPreference
我是Flutter的初学者。 我正在使用Audioplayers Flutter库来播放assets/sounds/my_sound.mp3中的素材声音。我正在这样使用AudioCache: Audi
我正在尝试使用 audioplayers flutter 插件在以下链接播放广播 https://video2b.vixtream.net/radio/v/fbcfm 我尝试设置网址,但这不起作用。
我如何可以多次播放同一音频文件,而不是循环播放,但我希望能够精确地播放多少次,所以我遵循此tutorial进行了基础(即播放/暂停功能)。我正在为其余的工作而苦苦挣扎。 我使用了audioplayer
为什么以下代码对我不起作用? :S AudioPlayer.player.start(file1); while(AudioPlayer.player.isAlive()) {} AudioPlaye
我发现使用 AVFoundation 可以直接播放单轨道,但在查看苹果指南后,我似乎无法播放多首轨道。我做这项工作哪里出错了?下面的 queuePlayer 是 AVQueuePlayer 的实例,据
这里有点不知所措。我正在使用 Xcode 7 和 Swift 2,我正在尝试寻找实际有效的流式音频示例。 本质上我希望能够流式传输,但我也试过这个:Swift Radio Streaming AVPl
我在我的应用程序中使用包 flutter 音频播放器来播放来自谷歌云 tts 的音频。 它在 iOS 模拟器上运行良好,但是当我构建 apk 时出现以下错误: e: /Users/josuha/Doc
关闭。这个问题需要更多 focused .它目前不接受答案。 想改进这个问题?更新问题,使其仅关注一个问题 editing this post . 8年前关闭。 Improve this questi
import javax.swing.*; import sun.audio.*; import java.awt.event.*; import java.io.*; public class So
我正在尝试使用 flutter 的音频播放器库播放简单的声音。当我尝试这样做时,出现了无法加载 Assets 错误。我已经在这个站点上查看了几个相关问题,所有这些问题似乎都只建议检查 pubspec.
在我的 swing 应用程序中,我使用类在单击鼠标时播放声音。我遇到的问题是,当我调用类(class)时,声音只播放一次,而当突然单击另一个按钮时,它不会播放声音。我尝试在我的代码中延迟,但 id 仍
import javax.swing.*; import sun.audio.*; import java.awt.event.*; import java.io.*; public class So
我正在使用以下代码将声音文件播放两次。第二个声音在第一个声音之后立即播放 是否可以在它们之间留出时间间隔(1 秒)?我试图通过阅读 Apple Docs 找到解决方案 var letterSound:
我有一个在 AVAudioPlayer 中播放的主菜单声音文件。 我想创建一个选项按钮,将我带到另一个 View Controller 。当我单击开关以选择“关闭”主菜单声音时。如何停止播放音频? 如
关闭。这个问题需要debugging details .它目前不接受答案。 编辑问题以包含 desired behavior, a specific problem or error, and th
我是一名优秀的程序员,十分优秀!