gpt4 book ai didi

Swift AVAssetWriter 将带有麦克风音频和设备音频的视频录制成带有 ONE 音轨 AVAssetTrack 的视频

转载 作者:行者123 更新时间:2023-12-02 22:48:25 26 4
gpt4 key购买 nike

我正在录制屏幕,我想将麦克风音频和应用程序音频中的声音组合成一个带有 ONE stero 音轨的视频。与 AVAssetWriter我有设置,它会创建一个包含两个单独音轨的视频文件;一个用于设备音频的立体声轨道和一个用于麦克风音频的单声道。这不好。

我还尝试获取生成的视频文件并使用单独的音频 AVAssetTrack 重建一个新的视频文件s 合并为一个,使用 AVMutableCompositionTrack小号 insertTimeRange(如下所示。但这不会合并轨道,无论我尝试什么,它只是将它们连接起来(按顺序,而不是相互重叠)。

请有人告诉我如何记录最初与 AVAssetWriter 合并的轨道。或者以后如何将它们合并。网上没有任何东西可以讨论并完成它。很多文章引用使用insertTimeRange(但此功能连接轨道。请帮忙。

到目前为止我使用的代码:

func startRecording(withFileName fileName: String, recordingHandler: @escaping (Error?) -> Void) {

let sharedRecorder = RPScreenRecorder.shared()
currentlyRecordingURL = URL(fileURLWithPath: CaptureArchiver.filePath(fileName))
guard currentlyRecordingURL != nil else { return }
desiredMicEnabled = RPScreenRecorder.shared().isMicrophoneEnabled
assetWriter = try! AVAssetWriter(outputURL: currentlyRecordingURL!, fileType: AVFileType.mp4)

let appAudioOutputSettings = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey : 2,
AVSampleRateKey : 44100.0,
AVEncoderBitRateKey: 192000
] as [String : Any]

let micAudioOutputSettings = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey : 1,
AVSampleRateKey : 44100.0,
AVEncoderBitRateKey: 192000
] as [String : Any]

let adjustedWidth = ceil(UIScreen.main.bounds.size.width/4)*4

let videoOutputSettings: Dictionary<String, Any> = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : adjustedWidth,
AVVideoHeightKey : UIScreen.main.bounds.size.height
]

let audioInput_app = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: appAudioOutputSettings)
audioInput_app.expectsMediaDataInRealTime = true
if assetWriter.canAdd(audioInput_app) { assetWriter.add(audioInput_app) }
self.audioInput_app = audioInput_app

let audioInput_mic = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: micAudioOutputSettings)
audioInput_mic.expectsMediaDataInRealTime = true
if assetWriter.canAdd(audioInput_mic) { assetWriter.add(audioInput_mic) }
self.audioInput_mic = audioInput_mic

let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(videoInput) { assetWriter.add(videoInput) }
self.videoInput = videoInput

RPScreenRecorder.shared().startCapture(handler: { [unowned self] (sample, bufferType, error) in

if CMSampleBufferDataIsReady(sample) {

DispatchQueue.main.async { [unowned self] in

if self.assetWriter.status == AVAssetWriter.Status.unknown {

self.assetWriter.startWriting()

#if DEBUG
let status = self.assetWriter.status
log(self, message: "LAUNCH assetWriter.status[\(status.rawValue)]:\(String(describing: self.readable(status)))")
#endif

self.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))

} else if self.assetWriter.status == AVAssetWriter.Status.failed {

recordingHandler(error)
return

} else {

switch bufferType {
case .audioApp:
if let audioInput_app = self.audioInput_app {
if audioInput_app.isReadyForMoreMediaData { audioInput_app.append(sample) }
}
case .audioMic:
if let audioInput_mic = self.audioInput_mic {
if audioInput_mic.isReadyForMoreMediaData { audioInput_mic.append(sample) }
}
case .video:
if let videoInput = self.videoInput {
if videoInput.isReadyForMoreMediaData { videoInput.append(sample) }
}
@unknown default:
fatalError("Unknown RPSampleBufferType:\(bufferType)")

}

}

}
}

}) { [unowned self] (error) in

recordingHandler(error)

if error == nil && self.desiredMicEnabled == true && RPScreenRecorder.shared().isMicrophoneEnabled == false {
self.viewController.mic_cap_denied = true
} else {
self.viewController.mic_cap_denied = false
}

}

}


func mergeAudioTracksInVideo(_ videoURL: URL, completion: @escaping ((Bool) -> Void)) {

let sourceAsset = AVURLAsset(url: videoURL)

let sourceVideoTrack: AVAssetTrack = sourceAsset.tracks(withMediaType: AVMediaType.video)[0]
let sourceAudioTrackApp: AVAssetTrack = sourceAsset.tracks(withMediaType: AVMediaType.audio)[0]
let sourceAudioTrackMic: AVAssetTrack = sourceAsset.tracks(withMediaType: AVMediaType.audio)[1]

let comp: AVMutableComposition = AVMutableComposition()

guard let newVideoTrack: AVMutableCompositionTrack = comp.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
completion(false)
return

}

newVideoTrack.preferredTransform = sourceVideoTrack.preferredTransform

guard let newAudioTrack: AVMutableCompositionTrack = comp.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
completion(false)
return

}

//THE MIXING //THIS STILL RESULTS IN TWO SEPARATE AUDIO TRACKS //LOOKS LIKE THIS IS MORE ABOUT VOLUME LEVELS

let mix = AVMutableAudioMix()

let audioMixInputParamsMic = AVMutableAudioMixInputParameters()
audioMixInputParamsMic.trackID = sourceAudioTrackMic.trackID
audioMixInputParamsMic.setVolume(1.0, at: CMTime.zero)

let audioMixInputParamsApp = AVMutableAudioMixInputParameters()
audioMixInputParamsApp.trackID = sourceAudioTrackApp.trackID
audioMixInputParamsApp.setVolume(1.0, at: CMTime.zero)

mix.inputParameters.append(audioMixInputParamsMic)
mix.inputParameters.append(audioMixInputParamsApp)

///////

let timeRange: CMTimeRange = CMTimeRangeMake(start: CMTime.zero, duration: sourceAsset.duration)

do {

try newVideoTrack.insertTimeRange(timeRange, of: sourceVideoTrack, at: CMTime.zero)
try newAudioTrack.insertTimeRange(timeRange, of: sourceAudioTrackMic, at: CMTime.zero)
try newAudioTrack.insertTimeRange(timeRange, of: sourceAudioTrackApp, at: CMTime.zero)

} catch {
completion(false)
return
}

let exporter: AVAssetExportSession = AVAssetExportSession(asset: comp, presetName: AVAssetExportPresetHighestQuality)!
exporter.audioMix = mix
exporter.outputFileType = AVFileType.mp4
exporter.outputURL = videoURL
removeFileAtURLIfExists(url: videoURL)

exporter.exportAsynchronously(completionHandler: {

switch exporter.status {
case AVAssetExportSession.Status.failed:
#if DEBUG
log(self, message: "1000000000failed \(String(describing: exporter.error))")
#endif
case AVAssetExportSession.Status.cancelled:
#if DEBUG
log(self, message: "1000000000cancelled \(String(describing: exporter.error))")
#endif
case AVAssetExportSession.Status.unknown:
#if DEBUG
log(self, message: "1000000000unknown\(String(describing: exporter.error))")
#endif
case AVAssetExportSession.Status.waiting:
#if DEBUG
log(self, message: "1000000000waiting\(String(describing: exporter.error))")
#endif
case AVAssetExportSession.Status.exporting:
#if DEBUG
log(self, message: "1000000000exporting\(String(describing: exporter.error))")
#endif
default:
#if DEBUG
log(self, message: "1000000000-----Mutable video exportation complete.")
#endif
}

completion(true)

})

}

最佳答案

在这个类中使用以及在以下函数中使用后用于记录开始和停止:

https://gist.github.com/mspvirajpatel/f7e1e258f3c1fff96917d82fa9c4c137

import AVFoundation
import ReplayKit

var rpScreenRecorder = RPScreenRecorder.shared()
var rpScreenWriter = RPScreenWriter()

func startRecord() {
rpScreenRecorder.isMicrophoneEnabled = true
rpScreenRecorder.startCapture(handler: { cmSampleBuffer, rpSampleBufferType, error in
if let error = error {
} else {
self.rpScreenWriter.writeBuffer(cmSampleBuffer, rpSampleType: rpSampleBufferType)
}
}) { error in

}
}


func stopRecording() {
rpScreenRecorder.stopCapture { error in
if let error = error {

} else {
self.rpScreenWriter.finishWriting(completionHandler: { url, error in
if let url = url {
print("\(url)")
}
})
}
}
}

关于Swift AVAssetWriter 将带有麦克风音频和设备音频的视频录制成带有 ONE 音轨 AVAssetTrack 的视频,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56518897/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com