- android - RelativeLayout 背景可绘制重叠内容
- android - 如何链接 cpufeatures lib 以获取 native android 库?
- java - OnItemClickListener 不起作用,但 OnLongItemClickListener 在自定义 ListView 中起作用
- java - Android 文件转字符串
下面的录音机只在第一次工作,如果你尝试第二次录音,它会在尝试 AudioFileWritePackets 时给出错误“kAudioFileInvalidPacketOffsetError”。
知道为什么会这样吗?
提前致谢
记录器
import UIKit
import CoreAudio
import AudioToolbox
class SpeechRecorder: NSObject {
static let sharedInstance = SpeechRecorder()
// MARK:- properties
@objc enum Status: Int {
case ready
case busy
case error
}
internal struct RecordState {
var format: AudioStreamBasicDescription
var queue: UnsafeMutablePointer<AudioQueueRef?>
var buffers: [AudioQueueBufferRef?]
var file: AudioFileID?
var currentPacket: Int64
var recording: Bool
};
private var recordState: RecordState?
var format: AudioFormatID {
get { return recordState!.format.mFormatID }
set { recordState!.format.mFormatID = newValue }
}
var sampleRate: Float64 {
get { return recordState!.format.mSampleRate }
set { recordState!.format.mSampleRate = newValue }
}
var formatFlags: AudioFormatFlags {
get { return recordState!.format.mFormatFlags }
set { recordState!.format.mFormatFlags = newValue }
}
var channelsPerFrame: UInt32 {
get { return recordState!.format.mChannelsPerFrame }
set { recordState!.format.mChannelsPerFrame = newValue }
}
var bitsPerChannel: UInt32 {
get { return recordState!.format.mBitsPerChannel }
set { recordState!.format.mBitsPerChannel = newValue }
}
var framesPerPacket: UInt32 {
get { return recordState!.format.mFramesPerPacket }
set { recordState!.format.mFramesPerPacket = newValue }
}
var bytesPerFrame: UInt32 {
get { return recordState!.format.mBytesPerFrame }
set { recordState!.format.mBytesPerFrame = newValue }
}
var bytesPerPacket: UInt32 {
get { return recordState!.format.mBytesPerPacket }
set { recordState!.format.mBytesPerPacket = newValue }
}
//MARK: - Handlers
public var handler: ((Status) -> Void)?
// MARK:- Init
override init()
{
super.init()
self.recordState = RecordState(format: AudioStreamBasicDescription(),
queue: UnsafeMutablePointer<AudioQueueRef?>.allocate(capacity: 1),
buffers: [AudioQueueBufferRef?](repeating: nil, count: 1),
file: nil,
currentPacket: 0,
recording: false)
}//eom
// MARK:- OutputFile
func setOutputFile(path: String)
{
setOutputFile(url: URL(fileURLWithPath: path))
}
func setOutputFile(url: URL)
{
AudioFileCreateWithURL(url as CFURL,
kAudioFileWAVEType,
&recordState!.format,
AudioFileFlags.dontPageAlignAudioData.union(.eraseFile),
&recordState!.file)
}
// MARK:- Start / Stop Recording
func start()
{
handler?(.busy)
let inputAudioQueue: AudioQueueInputCallback =
{ (userData: UnsafeMutableRawPointer?,
audioQueue: AudioQueueRef,
bufferQueue: AudioQueueBufferRef,
startTime: UnsafePointer<AudioTimeStamp>,
packets: UInt32,
packetDescription: UnsafePointer<AudioStreamPacketDescription>?) in
let internalRSP = unsafeBitCast(userData, to: UnsafeMutablePointer<RecordState>.self)
if packets > 0
{
var packetsReceived = packets
let outputStream:OSStatus = AudioFileWritePackets(internalRSP.pointee.file!,
false,
bufferQueue.pointee.mAudioDataByteSize,
packetDescription,
internalRSP.pointee.currentPacket,
&packetsReceived,
bufferQueue.pointee.mAudioData)
if outputStream != 0
{
// This is where the error occurs when recording after the first time
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
}
//<----DEBUG
}
internalRSP.pointee.currentPacket += Int64(packetsReceived)
}
if internalRSP.pointee.recording
{
let outputStream:OSStatus = AudioQueueEnqueueBuffer(audioQueue, bufferQueue, 0, nil)
if outputStream != 0
{
// This is where the error occurs when recording after the first time
//<----DEBUG
switch outputStream
{
case kAudioFilePermissionsError:
print("kAudioFilePermissionsError")
break
case kAudioFileNotOptimizedError:
print("kAudioFileNotOptimizedError")
break
case kAudioFileInvalidChunkError:
print("kAudioFileInvalidChunkError")
break
case kAudioFileDoesNotAllow64BitDataSizeError:
print("kAudioFileDoesNotAllow64BitDataSizeError")
break
case kAudioFileInvalidPacketOffsetError:
print("kAudioFileInvalidPacketOffsetError")
break
case kAudioFileInvalidFileError:
print("kAudioFileInvalidFileError")
break
case kAudioFileOperationNotSupportedError:
print("kAudioFileOperationNotSupportedError")
break
case kAudioFileNotOpenError:
print("kAudioFileNotOpenError")
break
case kAudioFileEndOfFileError:
print("kAudioFileEndOfFileError")
break
case kAudioFilePositionError:
print("kAudioFilePositionError")
break
case kAudioFileFileNotFoundError:
print("kAudioFileFileNotFoundError")
break
case kAudioFileUnspecifiedError:
print("kAudioFileUnspecifiedError")
break
case kAudioFileUnsupportedFileTypeError:
print("kAudioFileUnsupportedFileTypeError")
break
case kAudioFileUnsupportedDataFormatError:
print("kAudioFileUnsupportedDataFormatError")
break
case kAudioFileUnsupportedPropertyError:
print("kAudioFileUnsupportedPropertyError")
break
case kAudioFileBadPropertySizeError:
print("kAudioFileBadPropertySizeError")
break
default:
print("unknown error")
break
}
//<----DEBUG
}
}
}
let queueResults = AudioQueueNewInput(&recordState!.format, inputAudioQueue, &recordState, nil, nil, 0, recordState!.queue)
if queueResults == 0
{
let bufferByteSize: Int = calculate(format: recordState!.format, seconds: 0.5)
for index in (0..<recordState!.buffers.count)
{
AudioQueueAllocateBuffer(recordState!.queue.pointee!, UInt32(bufferByteSize), &recordState!.buffers[index])
AudioQueueEnqueueBuffer(recordState!.queue.pointee!, recordState!.buffers[index]!, 0, nil)
}
AudioQueueStart(recordState!.queue.pointee!, nil)
recordState?.recording = true
}
else
{
print("Error setting audio input.")
handler?(.error)
}
}//eom
func stop()
{
recordState?.recording = false
if let recordingState: RecordState = recordState
{
AudioQueueStop(recordingState.queue.pointee!, true)
AudioQueueDispose(recordingState.queue.pointee!, true)
AudioFileClose(recordingState.file!)
handler?(.ready)
}
}//eom
// MARK:- Helper methods
func calculate(format: AudioStreamBasicDescription, seconds: Double) -> Int
{
let framesRequiredForBufferTime = Int(ceil(seconds * format.mSampleRate))
if framesRequiredForBufferTime > 0
{
return (framesRequiredForBufferTime * Int(format.mBytesPerFrame))
}
else
{
var maximumPacketSize = UInt32(0)
if format.mBytesPerPacket > 0
{
maximumPacketSize = format.mBytesPerPacket
}
else
{
audioQueueProperty(propertyId: kAudioQueueProperty_MaximumOutputPacketSize, value: &maximumPacketSize)
}
var packets = 0
if format.mFramesPerPacket > 0
{
packets = (framesRequiredForBufferTime / Int(format.mFramesPerPacket))
} else
{
packets = framesRequiredForBufferTime
}
if packets == 0
{
packets = 1
}
return (packets * Int(maximumPacketSize))
}
}//eom
func audioQueueProperty<T>(propertyId: AudioQueuePropertyID, value: inout T)
{
let propertySize = UnsafeMutablePointer<UInt32>.allocate(capacity: 1)
propertySize.pointee = UInt32(MemoryLayout<T>.size)
let queueResults = AudioQueueGetProperty(recordState!.queue.pointee!, propertyId, &value, propertySize)
propertySize.deallocate(capacity: 1)
if queueResults != 0 {
print("Unable to get audio queue property.")
}
}//eom
}
View Controller
import UIKit
import AudioToolbox
class ViewController: UIViewController {
//MARK: - Properties
var recorder:SpeechRecorder?
@IBOutlet weak var startStopRecordingButton: UIButton!
//MARK: - Lifecycle
override func viewDidLoad() {
super.viewDidLoad()
//having same recorder gives error
recorder = SpeechRecorder()
}
//MARK: - Start / End Recording
func startRecording()
{
//alloc/init recorder everytime we start recording gives no error
//recorder = SpeechRecorder()
//settings
recorder?.format = kAudioFormatLinearPCM
recorder?.sampleRate = 16000;
recorder?.channelsPerFrame = 1
recorder?.bitsPerChannel = 16
recorder?.framesPerPacket = 1
recorder?.bytesPerFrame = ((recorder!.channelsPerFrame * recorder!.bitsPerChannel) / 8)
recorder?.bytesPerPacket = recorder!.bytesPerFrame * recorder!.framesPerPacket
recorder?.formatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked
//outputfile
let outputfilePath:String = MyFileManager().createTempFilePathWithUniqueName("recorderAudio", andExtension: "wav")
print("temp filepath: ", outputfilePath)
recorder?.setOutputFile(path: outputfilePath)
//handler
recorder?.handler = { [weak self] status in
switch status
{
case .busy:
print("started Recording\n\n")
break
case .ready:
print("finish recorder, ready to start recording\n\n")
break
case .error:
print("error occur with recorder\n\n")
DispatchQueue.main.async
{
self?.startStopRecordingButton.isSelected = false
self?.view.backgroundColor = UIColor.white
}
break
}
}//
recorder?.start()
}//eom
func stopRecording()
{
recorder?.stop()
}//eom
//MARK: - Actions
@IBAction func startStopRecording()
{
if startStopRecordingButton.isSelected
{
startStopRecordingButton.isSelected = false
self.view.backgroundColor = UIColor.white
startStopRecordingButton.setTitle("Start Recording", for: UIControlState.normal)
self.stopRecording()
}
else
{
startStopRecordingButton.isSelected = true
self.view.backgroundColor = UIColor.green
startStopRecordingButton.setTitle("Stop Recording", for: UIControlState.normal)
self.startRecording()
}
}//eom
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
FileManager(创建临时文件路径)
import Foundation
@objc class MyFileManager:NSObject
{
private let unique_debug = true
private var _temporyDirectory:String = ""
//MARK: - Properties
var directory:String {
return _temporyDirectory
}
//MARK: - Init
override init() {
super.init()
_temporyDirectory = NSTemporaryDirectory()
}//eom
func createHomeDirFileUniqueWithName(_ myFileName:String, andExtension fileExtension:String)->URL
{
//filename
let time:Date = Date.init()
let dateformatter:DateFormatter = DateFormatter()
dateformatter .dateFormat = "ddMMyyyy-hh-mm-ss-a"
let tempDate:String = dateformatter .string(from: time)
let tempFileName = "\(myFileName)-\(tempDate).\(fileExtension)"
//directory
var documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
documentsDirectory.appendPathComponent(tempFileName)
if unique_debug { print("\(documentsDirectory)") }
return documentsDirectory
}//eom
//MARK: - Names
func createGlobalUniqueFileName(_ myFileName:String)->String
{
let guid = ProcessInfo.processInfo.globallyUniqueString
let uniqueFileName = ("\(myFileName)_\(guid)")
if unique_debug { print("\(uniqueFileName)") }
return uniqueFileName
}//eom
func createUniqueNameWithFilename(_ myFileName:String, andExtension fileExtension:String)->String
{
//filename
let time:Date = Date.init()
let dateformatter:DateFormatter = DateFormatter()
dateformatter .dateFormat = "ddMMyyyy-hh-mm-ss-a"
let currentDateString = dateformatter .string(from: time)
let finalName = myFileName + currentDateString + "." + fileExtension
if unique_debug { print("\(finalName)") }
return finalName
}//eom
//MARK: - Paths
func createTempFilePathWithUniqueName(_ myFileName:String, andExtension fileExtension:String)->String
{
let tempFileName = self.createUniqueNameWithFilename(myFileName, andExtension: fileExtension)
let tempFile = _temporyDirectory + tempFileName
if unique_debug { print("\(tempFile)") }
return tempFile
}//eom
//MARK: - Helpers
func enumerateDirectory(directory:String)
{
do
{
let filesInDir:[String] = try FileManager.default.contentsOfDirectory(atPath: directory)
for currFile in filesInDir {
print(currFile)
}//eofl
}
catch let error
{
print("error: \(error.localizedDescription)")
}
}//eom
func doesFileExistInDirectory(filename:String) -> Bool {
do
{
let filesInDir:[String] = try FileManager.default.contentsOfDirectory(atPath: _temporyDirectory)
for currFile in filesInDir
{
print(currFile)
if currFile == filename {
return true
}
}//eofl
}
catch let error
{
print("error: \(error.localizedDescription)")
}
return false
}//eom
}//eoc
最佳答案
您没有将 currentPacket
计数重置为零,因此在后续录音中,您要求 AudioFileWritePackets
从非零起始数据包开始写入其新文件,它拒绝这样做。
正确的解决方案(可能)是在每次开始录制时重新创建 RecordState
,尽管设置不当
recordState!.currentPacket = 0
在调用 AudioQueueNewInput
之前似乎也有效。
关于ios - Swift 3 LPCM 录音机 |错误 : kAudioFileInvalidPacketOffsetError,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40558374/
关闭。这个问题需要details or clarity .它目前不接受答案。 想改进这个问题?通过 editing this post 添加详细信息并澄清问题. 6年前关闭。 Improve this
我有一个每秒 44100 个 LPCM 数据样本的数组。实际上我有两个 channel 的数据。 每 11.61 毫秒我得到大约 512 个样本。 现在我想按照 How to cancel noise
我想以编程方式生成声波并使用 AVAudioPlayer 播放它。我有代码将波形编码为线性 PCM、44100Hz、单声道、每个样本 8 位。 我不清楚我需要用什么样的信封来包裹这个缓冲区,以便 AV
下面的录音机只在第一次工作,如果你尝试第二次录音,它会在尝试 AudioFileWritePackets 时给出错误“kAudioFileInvalidPacketOffsetError”。 知道为什
使用audioqueue播放ffmpeg解码的lpcm数据时,耗时AudioQueueGetCurrentTime超过媒体的持续时间。但是当用AVFoundation框架解码相同的媒体时,耗时等于媒体
在aurioTouch示例应用 RemoteIO 音频单元配置为 8.24 定点格式的 2 channel 非交错 LPCM。这是 iOS 平台上的首选格式,我假设这就是硬件 ADC 发出的格式。他们
我是一名优秀的程序员,十分优秀!