gpt4 book ai didi

ios - 为什么合并两个视频或快速更改背景音乐后视频变成黑屏?

转载 作者:行者123 更新时间:2023-12-05 06:26:15 24 4
gpt4 key购买 nike

在我的 iOS 应用程序中,我想合并两个视频并更改背景音乐。我试过了,它适用于普通视频。但是当我选择任何延时视频然后尝试合并或更改背景音乐时,视频变成全黑屏。

对于我的应用程序,我使用的是 swift 4.2 和 xcode-10。我还尝试了 swift 4 和 swift 5,两者都返回相同的结果。

这是我的代码:

class Export: NSObject {

let defaultSize = CGSize(width: 1920, height: 1920)
typealias Completion = (URL?, Error?) -> Void

func mergeVideos(arrayVideos:[URL], exportURL: URL, completion:@escaping Completion) -> Void {

var errors: Error!
var insertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize(width: 0, height: 0)

// Determine video output size
for url in arrayVideos {

let videoAsset = AVAsset(url: url)
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]

var videoSize = videoTrack.naturalSize.applying(videoTrack.preferredTransform)

videoSize.width = fabs(videoSize.width)
videoSize.height = fabs(videoSize.height)

if outputSize.height == 0 || videoSize.height > outputSize.height {
outputSize.height = videoSize.height
}

if outputSize.width == 0 || videoSize.width > outputSize.width {
outputSize.width = videoSize.width
}
}

// Silence sound (in case of video has no sound track)
guard let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3") else { completion(nil, errors); return }
let silenceAsset = AVAsset(url:silenceURL)
let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first

// Init composition
let mixComposition = AVMutableComposition.init()

for url in arrayVideos {

let videoAsset = AVAsset(url: url)
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else {

print("video asset track not found")
continue
}

// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
audioTrack = silenceSoundTrack
}

// Init video & audio composition track
guard let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { completion(nil, errors); return }

guard let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { completion(nil, errors); return }

do {
let startTime = kCMTimeZero
let duration = videoAsset.duration

// Add video track to video composition at specific time
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)

// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}

// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack, asset: videoAsset, standardSize: outputSize, atTime: insertTime)

// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
layerInstruction.setOpacity(0, at: endTime)
arrayLayerInstructions.append(layerInstruction)

// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
}

// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions

// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize

// Init exporter
guard let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else {

errors = "exporter initialization failed" as? Error
completion(nil, errors)
return
}
exporter.outputURL = exportURL
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition

// Do export
exporter.exportAsynchronously(completionHandler: {


})

}
}

// MARK:- Private methods
extension Export {
fileprivate func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}

fileprivate func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {

let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let assetSize = assetTrack.naturalSize

let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)

let aspectFillRatio:CGFloat = 1

if assetInfo.isPortrait {

let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)

instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)

} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)

if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}

instruction.setTransform(concat, at: atTime)
}
return instruction
}
}

我预计延时视频会像普通视频一样工作,不会出现黑屏

最佳答案

//主视频合成说明

用下面的代码替换了这段代码

    let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: mutableComposition.duration)

let videotrack = mutableComposition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)

let rgb = CGColorSpaceCreateDeviceRGB()
let myColor : [CGFloat] = [1.0, 1.0, 1.0, 1.0] //white
let ref = CGColor(colorSpace: rgb, components: myColor)
instruction.backgroundColor = ref


instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
videoComposition.instructions = [instruction]

关于ios - 为什么合并两个视频或快速更改背景音乐后视频变成黑屏?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56359931/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com