- r - 以节省内存的方式增长 data.frame
- ruby-on-rails - ruby/ruby on rails 内存泄漏检测
- android - 无法解析导入android.support.v7.app
- UNIX 域套接字与共享内存(映射文件)
假设我们有两个视频 Assets (AVAsset 对象),我们称它们为blank 和main,其中main 是随机的视频长度有限,比方说 2-5 分钟,空白 始终是 4 秒的视频,我们希望按以下顺序合并视频:
空白 - 主要 - 空白
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
let assets = [blank, main, blank]
var totalTime : CMTime = CMTimeMake(0, 0)
var atTimeM: CMTime = CMTimeMake(0, 0)
Utils.log([blank.duration, main.duration])
// VIDEO TRACK
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
for (index,asset) in assets.enumerated() {
do {
if index == 0 {
atTimeM = kCMTimeZero
} else {
atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
}
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, asset.duration), of: asset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)
} catch let error as NSError {
Utils.log("error: \(error)")
}
totalTime = CMTimeAdd(totalTime, asset.duration)
}
// AUDIO TRACK
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, main.duration), of: main.tracks(withMediaType: AVMediaTypeAudio)[0], at: blank.duration)
} catch _ {
completionHandler(nil, ErrorType(rawValue: "Unable to add audio in composition."))
return
}
let outputURL = mainVideoObject.getDirectoryURL()?.appendingPathComponent("video-with-blank.mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1280x720) else {
completionHandler(nil, ErrorType(rawValue: "Unable to create export session."))
return
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(blank.duration, CMTimeAdd(main.duration, blank.duration)))
// Fixing orientation
let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let firstAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
firstLayerInstruction.setTransform(firstAssetTrack.preferredTransform, at: kCMTimeZero)
firstLayerInstruction.setOpacity(0.0, at: blank.duration)
let secondLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let secondAssetTrack = main.tracks(withMediaType: AVMediaTypeVideo)[0]
var isSecondAssetPortrait = false
let secondTransform = secondAssetTrack.preferredTransform
if (secondTransform.a == 0 && secondTransform.b == 1.0 && secondTransform.c == -1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
if (secondTransform.a == 0 && secondTransform.b == -1.0 && secondTransform.c == 1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
secondLayerInstruction.setTransform(secondAssetTrack.preferredTransform, at: blank.duration)
secondLayerInstruction.setOpacity(0.0, at: CMTimeAdd(blank.duration, main.duration))
let thirdLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let thirdAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
thirdLayerInstruction.setTransform(thirdAssetTrack.preferredTransform, at: CMTimeAdd(blank.duration, main.duration))
mainInstruction.layerInstructions = [firstLayerInstruction, secondLayerInstruction, thirdLayerInstruction]
var naturalSize = CGSize()
if(isSecondAssetPortrait) {
naturalSize = CGSize(width: secondAssetTrack.naturalSize.height, height: secondAssetTrack.naturalSize.width)
} else {
naturalSize = secondAssetTrack.naturalSize
}
let renderWidth = naturalSize.width
let renderHeight = naturalSize.height
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = CGSize(width: renderWidth, height: renderHeight)
exporter.outputURL = outputURL
exporter.outputFileType = AVFileTypeMPEG4
exporter.videoComposition = mainCompositionInst
//exporter.shouldOptimizeForNetworkUse = true
exporter.exportAsynchronously {
if exporter.status == .completed {
completionHandler(AVAsset(url: outputURL!), nil)
} else {
completionHandler(nil, ErrorType(rawValue: "Unable to export video."))
if let error = exporter.error {
Utils.log("Unable to export video. \(error)")
}
}
}
假设以 720p 质量录制 5 分钟的原始视频需要大约 200MB 的空间,在主视频的开头和结尾添加 4 秒的空白视频应该不会显着改变大小,并且应该可以非常快地完成处理。
然而,结果是视频的大小是原始视频的 2 到 2.5 倍(即 400 - 500 MB)并且处理时间太长。
请指教,
谢谢
最佳答案
我在这里准备了一个自定义类,您可以在其中传递您的视频名称并将这些视频保存到 bundle 中。一旦您运行您的应用程序,它将根据您的要求生成一个新的视频文件并将其放入应用程序文档目录路径中。
我使用 Swift 4 准备了这个演示
//
// ViewController.swift
// SOVideoMergingDemo
//
// Created by iOS Test User on 03/01/18.
// Copyright © 2018 Test User. Ltd. All rights reserved.
//
import UIKit
import AVFoundation
import MediaPlayer
import Photos
import AssetsLibrary
import AVKit
class ViewController : UIViewController {
//--------------------------------------------------
//MARK:
//MARK: - IBOutlets
//--------------------------------------------------
//--------------------------------------------------
//MARK:
//MARK: - Properties
//--------------------------------------------------
var videoUrls : [URL] = []
var arrVideoAsset : [AVAsset] = []
let video1 = "1"
let video2 = "2"
let outPutVideo = "MergedVideo.mp4"
let semaphore = DispatchSemaphore(value: 1)
//--------------------------------------------------
//MARK:
//MARK: - Custom Methods
//--------------------------------------------------
func getVideoURL(forVideo : String) -> URL {
let videoPath = Bundle.main.path(forResource: forVideo, ofType:"mp4")
let vidURL = URL(fileURLWithPath: videoPath!)
return vidURL
}
//--------------------------------------------------
func mergeVideos(arrVideoAsset : [AVAsset]) {
let mixComposition = AVMutableComposition()
//Tracks to insert in Composition for Merging
// Create video tracks
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let thirdTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: kCMTimeZero)
} catch _ {
print("Failed to load first track")
}
do {
try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[1].duration), of: arrVideoAsset[1].tracks(withMediaType: .video)[0], at: arrVideoAsset[0].duration)
} catch _ {
print("Failed to load second track")
}
do {
try thirdTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: arrVideoAsset[1].duration)
} catch _ {
print("Failed to load second track")
}
//This Instruciton is Created for Merging Video Tracks
let compositionInstruction = AVMutableVideoCompositionInstruction()
compositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero,CMTimeAdd(arrVideoAsset[0].duration, CMTimeAdd(arrVideoAsset[1].duration, arrVideoAsset[2].duration)))
//Creating Layer Instruction for Videos
let firstInstruction = videoCompositionInstructionForTrack(firstTrack!, asset: arrVideoAsset[0])
firstInstruction.setOpacity(0.0, at: arrVideoAsset[0].duration )
let secondInstruction = videoCompositionInstructionForTrack(secondTrack!, asset: arrVideoAsset[1])
secondInstruction.setOpacity(0.0, at: arrVideoAsset[1].duration)
let thirdInstruction = videoCompositionInstructionForTrack(thirdTrack!, asset: arrVideoAsset[2])
compositionInstruction.layerInstructions = [firstInstruction, secondInstruction,thirdInstruction]
//By Changing These Height and Width User can affect Size of Merged Video. Calucalte it Carefully and As per you needs
let height = (Float((firstTrack?.naturalSize.height)!) < Float((secondTrack?.naturalSize.height)!)) ? firstTrack?.naturalSize.height : secondTrack?.naturalSize.height
let width = (Float((firstTrack?.naturalSize.width)!) < Float((secondTrack?.naturalSize.width)!)) ? firstTrack?.naturalSize.width : secondTrack?.naturalSize.width
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [compositionInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width:width!,height: height!)
let exporter = AVAssetExportSession(asset:mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = URL(fileURLWithPath: getDocumentDirectoryPath() + "/" + outPutVideo)
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
print(self.getDocumentDirectoryPath())
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
do {
let videoData = try Data(contentsOf: exporter!.outputURL!)
try videoData.write(to: URL(fileURLWithPath : self.getDocumentDirectoryPath() + "/" + self.outPutVideo), options: Data.WritingOptions.atomic)
} catch {
print("Failed to Save video ===>>> \(error.localizedDescription)")
}
//Uncomment This If you want to save video in Photos Library
// PHPhotoLibrary.shared().performChanges({
// PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: (exporter?.outputURL)!)
// }, completionHandler: { (success, error) in
// if success {
// let fetchOptions = PHFetchOptions()
// fetchOptions.sortDescriptors = [NSSortDescriptor.init(key:"creationDate", ascending: false)]
// _ = PHAsset.fetchAssets(with: .video, options:fetchOptions).firstObject
// } else {
// print("Error in Saving File in Photo Libaray -> \(String(describing: error?.localizedDescription))")
// }
// })
} else {
print("Error -> \(String(describing: exporter?.error?.localizedDescription))")
}
}
})
}
//--------------------------------------------------
//This Methiod is Used to Make Layer Instruction for Particular Video
func videoCompositionInstructionForTrack(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: kCMTimeZero)
return instruction
}
//--------------------------------------------------
func getDocumentDirectoryPath() -> String {
let arrPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
return arrPaths[0]
}
//--------------------------------------------------
//MARK:
//MARK: - View Life Cycle Methods
//--------------------------------------------------
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
//Prepare Video Assets
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video2)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))
//Merge this Videos
mergeVideos(arrVideoAsset:arrVideoAsset)
}
}
关于ios - 是什么导致 AVMutableComposition 大幅增加视频的大小? - iOS、 swift 、AVFoundation,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45610327/
我正在尝试在嵌套层次结构中使用 AVMutableComposition。因此我必须将一个作品放入另一个作品中。这样做会导致以下错误消息: AVCompositionTest[45347:10703]
当源视频为纵向时,此功能会将合并的合成导出为横向。我将原始视频以纵向保存到我的文档目录中,然后将其保存到相机胶卷中并且工作正常。然后,我将保存的视频的 URL 传递给该函数,它会以某种方式将其旋转为横
我在 iOS 应用程序中使用 AVCaptureConnection 捕获视频。之后,我在视频中添加一些图像作为 CALayers。一切工作正常,但在添加图像后,我在生成的视频的最后出现黑框。实际音频
我一直在尝试使用 AVMutableComposition 将音频(录制的)与视频(.mp4)混合,这意味着我需要 2 个文件在混合后并行播放,这是我使用的代码:temporaryRecFile 为录
我正在使用 PryntTrimmerView 来修剪视频文件。有我的导出修剪视频文件和生成视频缩略图的代码: func prepareAssetComposition() throws { g
我有点困惑为什么每次出现这一行都被视为内存泄漏: let asset = AVMutableComposition() 查看 SO 并没有真正找到任何答案。只是想在我提交到应用商店之前清除一些内存泄漏
我的应用程序结合了视频序列,并根据情况将音乐添加到最终序列。添加音乐后,该应用会检查用户是否选择了一段视频作为开场,然后将其组合成最终视频。 在运行 profiler 时,我在第二次和第三次执行此操作
我一天中的大部分时间都在围绕 StackOverflow 进行深入研究,虽然有很多关于该主题的精彩帖子,但我还没有找到解决我的问题的方法。 p> 我正在使用 AVAssetWriter 编写视频文件,
我想在我的 AVMutableComposition 的末尾渲染一些图形,就像电影结尾的演职员表一样。我怎样才能创建一个空白 Assets 来延长合成时间,给我一些可以渲染的空白空间? 最佳答案 我找
我有一个与 AVPlayer 关联的 AVMutableComposition。似乎如果我将轨道添加到这个组合中,AVPlayer 不会播放添加的轨道,除非我再次在播放器上设置组合。现在我只是通过用已
这个问题与AVMutableComposition - Blank/Black frame between videos assets 很相关但由于我没有使用 AVAssetExportSession
我正在使用 AVMutableComposition 和 AVAssetExportSession 来剪辑视频。随机地,我的意思是随机地(我无法始终如一地重现)用户的视频在修剪视频的开头有几个黑框。音
我正在使用 AVMutableComposition 创建视频并使用 AVAssetExportSession 导出它并为其提供预设 AVAssetExportPresetHighestQuality
我正在尝试使用 AVMutableComposition 将图像渲染到使用前置摄像头拍摄的视频中。生成的视频(包括图像)的大小非常好。 但是,初始视频将调整大小,如下图所示: 我正在使用 NextLe
为了修剪视频,我使用了AVMutableCompositionTrack的removeTimeRange方法。 [mCachedCompositionTrack removeTimeRange:CMT
我正在尝试将 4 个音轨组合成一个乐曲,然后将该乐曲导出到一个文件。到目前为止,我的文件已成功创建,但所有音轨都以全音量播放,而不是我尝试设置的音量级别。这是我现在正在做的事情: AVMutableC
我正在尝试合并和裁剪一系列视频。一切正常,但生成的视频旋转了 90 度。我试图应用旋转变换,但没有任何 react 。我已经查看了至少 4 个其他线程,看看他们的解决方案是什么,而且他们几乎都在做和我
我在视频上添加水印。我设置了原点水印层(10;10),但是导出后,水印位于左下角。如果我增加 y,水印会向上移动。好像y轴自下而上增加。 任何人都可以提出一些建议吗? NSString *path =
我在此处发布的问题的答案中使用代码(在下面添加): Swift 3: How to add watermark on video ? AVVideoCompositionCoreAnimationTo
我构建的应用程序需要以下功能: 将视频组合成单个 AVMutableComposition 并将该单个视频组合呈现给用户。 以多个 block 的形式导出视频合成。例如,如果整个作品时长为 60 秒,
我是一名优秀的程序员,十分优秀!