gpt4 book ai didi

ios - 是什么导致 AVMutableComposition 大幅增加视频的大小? - iOS、 swift 、AVFoundation

转载 作者:IT王子 更新时间:2023-10-29 05:37:54 30 4
gpt4 key购买 nike

假设我们有两个视频 Assets (AVAsset 对象),我们称它们为blankma​​in,其中ma​​in 是随机的视频长度有限,比方说 2-5 分钟,空白 始终是 4 秒的视频,我们希望按以下顺序合并视频:

空白 - 主要 - 空白

    // Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.

let mixComposition = AVMutableComposition()

let assets = [blank, main, blank]
var totalTime : CMTime = CMTimeMake(0, 0)
var atTimeM: CMTime = CMTimeMake(0, 0)

Utils.log([blank.duration, main.duration])

// VIDEO TRACK
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))

for (index,asset) in assets.enumerated() {

do {

if index == 0 {
atTimeM = kCMTimeZero
} else {
atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
}

try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, asset.duration), of: asset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)

} catch let error as NSError {
Utils.log("error: \(error)")
}

totalTime = CMTimeAdd(totalTime, asset.duration)
}

// AUDIO TRACK
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, main.duration), of: main.tracks(withMediaType: AVMediaTypeAudio)[0], at: blank.duration)
} catch _ {
completionHandler(nil, ErrorType(rawValue: "Unable to add audio in composition."))
return
}

let outputURL = mainVideoObject.getDirectoryURL()?.appendingPathComponent("video-with-blank.mp4")

guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1280x720) else {
completionHandler(nil, ErrorType(rawValue: "Unable to create export session."))
return
}

let mainInstruction = AVMutableVideoCompositionInstruction()

mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(blank.duration, CMTimeAdd(main.duration, blank.duration)))

// Fixing orientation
let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let firstAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
firstLayerInstruction.setTransform(firstAssetTrack.preferredTransform, at: kCMTimeZero)
firstLayerInstruction.setOpacity(0.0, at: blank.duration)

let secondLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let secondAssetTrack = main.tracks(withMediaType: AVMediaTypeVideo)[0]
var isSecondAssetPortrait = false
let secondTransform = secondAssetTrack.preferredTransform
if (secondTransform.a == 0 && secondTransform.b == 1.0 && secondTransform.c == -1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
if (secondTransform.a == 0 && secondTransform.b == -1.0 && secondTransform.c == 1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
secondLayerInstruction.setTransform(secondAssetTrack.preferredTransform, at: blank.duration)
secondLayerInstruction.setOpacity(0.0, at: CMTimeAdd(blank.duration, main.duration))

let thirdLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let thirdAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
thirdLayerInstruction.setTransform(thirdAssetTrack.preferredTransform, at: CMTimeAdd(blank.duration, main.duration))

mainInstruction.layerInstructions = [firstLayerInstruction, secondLayerInstruction, thirdLayerInstruction]

var naturalSize = CGSize()
if(isSecondAssetPortrait) {
naturalSize = CGSize(width: secondAssetTrack.naturalSize.height, height: secondAssetTrack.naturalSize.width)
} else {
naturalSize = secondAssetTrack.naturalSize
}

let renderWidth = naturalSize.width
let renderHeight = naturalSize.height

let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = CGSize(width: renderWidth, height: renderHeight)

exporter.outputURL = outputURL
exporter.outputFileType = AVFileTypeMPEG4
exporter.videoComposition = mainCompositionInst
//exporter.shouldOptimizeForNetworkUse = true

exporter.exportAsynchronously {
if exporter.status == .completed {
completionHandler(AVAsset(url: outputURL!), nil)
} else {
completionHandler(nil, ErrorType(rawValue: "Unable to export video."))
if let error = exporter.error {
Utils.log("Unable to export video. \(error)")
}
}
}

假设以 720p 质量录制 5 分钟的原始视频需要大约 200MB 的空间,在主视频的开头和结尾添加 4 秒的空白视频应该不会显着改变大小,并且应该可以非常快地完成处理。

然而,结果是视频的大小是原始视频的 2 到 2.5 倍(即 400 - 500 MB)并且处理时间太长。

请指教,

谢谢

最佳答案

我在这里准备了一个自定义类,您可以在其中传递您的视频名称并将这些视频保存到 bundle 中。一旦您运行您的应用程序,它将根据您的要求生成一个新的视频文件并将其放入应用程序文档目录路径中。

我使用 Swift 4 准备了这个演示

//
// ViewController.swift
// SOVideoMergingDemo
//
// Created by iOS Test User on 03/01/18.
// Copyright © 2018 Test User. Ltd. All rights reserved.
//

import UIKit
import AVFoundation
import MediaPlayer
import Photos
import AssetsLibrary
import AVKit


class ViewController : UIViewController {

//--------------------------------------------------
//MARK:
//MARK: - IBOutlets
//--------------------------------------------------




//--------------------------------------------------
//MARK:
//MARK: - Properties
//--------------------------------------------------

var videoUrls : [URL] = []
var arrVideoAsset : [AVAsset] = []
let video1 = "1"
let video2 = "2"
let outPutVideo = "MergedVideo.mp4"

let semaphore = DispatchSemaphore(value: 1)


//--------------------------------------------------
//MARK:
//MARK: - Custom Methods
//--------------------------------------------------

func getVideoURL(forVideo : String) -> URL {
let videoPath = Bundle.main.path(forResource: forVideo, ofType:"mp4")
let vidURL = URL(fileURLWithPath: videoPath!)
return vidURL
}

//--------------------------------------------------

func mergeVideos(arrVideoAsset : [AVAsset]) {

let mixComposition = AVMutableComposition()

//Tracks to insert in Composition for Merging
// Create video tracks
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let thirdTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)

do {
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: kCMTimeZero)
} catch _ {
print("Failed to load first track")
}

do {
try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[1].duration), of: arrVideoAsset[1].tracks(withMediaType: .video)[0], at: arrVideoAsset[0].duration)
} catch _ {
print("Failed to load second track")
}

do {
try thirdTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: arrVideoAsset[1].duration)
} catch _ {
print("Failed to load second track")
}

//This Instruciton is Created for Merging Video Tracks
let compositionInstruction = AVMutableVideoCompositionInstruction()
compositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero,CMTimeAdd(arrVideoAsset[0].duration, CMTimeAdd(arrVideoAsset[1].duration, arrVideoAsset[2].duration)))

//Creating Layer Instruction for Videos
let firstInstruction = videoCompositionInstructionForTrack(firstTrack!, asset: arrVideoAsset[0])
firstInstruction.setOpacity(0.0, at: arrVideoAsset[0].duration )
let secondInstruction = videoCompositionInstructionForTrack(secondTrack!, asset: arrVideoAsset[1])
secondInstruction.setOpacity(0.0, at: arrVideoAsset[1].duration)
let thirdInstruction = videoCompositionInstructionForTrack(thirdTrack!, asset: arrVideoAsset[2])

compositionInstruction.layerInstructions = [firstInstruction, secondInstruction,thirdInstruction]

//By Changing These Height and Width User can affect Size of Merged Video. Calucalte it Carefully and As per you needs
let height = (Float((firstTrack?.naturalSize.height)!) < Float((secondTrack?.naturalSize.height)!)) ? firstTrack?.naturalSize.height : secondTrack?.naturalSize.height

let width = (Float((firstTrack?.naturalSize.width)!) < Float((secondTrack?.naturalSize.width)!)) ? firstTrack?.naturalSize.width : secondTrack?.naturalSize.width

let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [compositionInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width:width!,height: height!)

let exporter = AVAssetExportSession(asset:mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = URL(fileURLWithPath: getDocumentDirectoryPath() + "/" + outPutVideo)
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
print(self.getDocumentDirectoryPath())

exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
do {
let videoData = try Data(contentsOf: exporter!.outputURL!)
try videoData.write(to: URL(fileURLWithPath : self.getDocumentDirectoryPath() + "/" + self.outPutVideo), options: Data.WritingOptions.atomic)
} catch {
print("Failed to Save video ===>>> \(error.localizedDescription)")
}


//Uncomment This If you want to save video in Photos Library
// PHPhotoLibrary.shared().performChanges({
// PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: (exporter?.outputURL)!)
// }, completionHandler: { (success, error) in
// if success {
// let fetchOptions = PHFetchOptions()
// fetchOptions.sortDescriptors = [NSSortDescriptor.init(key:"creationDate", ascending: false)]
// _ = PHAsset.fetchAssets(with: .video, options:fetchOptions).firstObject
// } else {
// print("Error in Saving File in Photo Libaray -> \(String(describing: error?.localizedDescription))")
// }
// })
} else {
print("Error -> \(String(describing: exporter?.error?.localizedDescription))")
}
}
})

}

//--------------------------------------------------

//This Methiod is Used to Make Layer Instruction for Particular Video
func videoCompositionInstructionForTrack(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: kCMTimeZero)
return instruction
}

//--------------------------------------------------

func getDocumentDirectoryPath() -> String {
let arrPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
return arrPaths[0]
}

//--------------------------------------------------
//MARK:
//MARK: - View Life Cycle Methods
//--------------------------------------------------

override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.

//Prepare Video Assets
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video2)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))

//Merge this Videos
mergeVideos(arrVideoAsset:arrVideoAsset)
}
}

关于ios - 是什么导致 AVMutableComposition 大幅增加视频的大小? - iOS、 swift 、AVFoundation,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45610327/

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com