- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在使用 Swift 开发基于视频的应用程序。我在哪里导出带有水印 Logo 和淡入淡出效果的视频剪辑。这是我的代码:
func watermark(video videoAsset:AVAsset, videoModal:VideoModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, withMode mode: SpeedoVideoMode, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
let servicemodel = ServiceModel()
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).sync {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
if videoAsset.tracks(withMediaType: AVMediaTypeVideo).count == 0
{
completion!(nil, nil, nil)
return
}
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
self.addAudioTrack(composition: mixComposition, videoAsset: videoAsset as! AVURLAsset, withMode: mode, videoModal:videoModal)
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize //CGSize(width: 375, height: 300)
//to add Watermark
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
//videoLayer.backgroundColor = UIColor.red.cgColor
parentLayer.addSublayer(videoLayer)
if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
//imageLayer.backgroundColor = UIColor.purple.cgColor
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 150
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize - 100
yPosition = 80
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.75
parentLayer.addSublayer(imageLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 20
titleLayer.alignmentMode = kCAAlignmentRight
titleLayer.frame = CGRect(x: 0, y: yPosition - imageSize, width: videoSize.width - imageSize/2 - 4, height: 57)
titleLayer.foregroundColor = UIColor.lightGray.cgColor
parentLayer.addSublayer(titleLayer)
}
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
//Add Fade In Out effects
let startTime = CMTime(seconds: Double(0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(1), preferredTimescale: 1000)
let timeRange = CMTimeRange(start: startTime, end: endTime)
layerInstruction.setOpacityRamp(fromStartOpacity: 0.1, toEndOpacity: 1.0, timeRange: timeRange)
let startTime1 = CMTime(seconds: videoAsset.duration.seconds-1, preferredTimescale: 1000)
let endTime1 = CMTime(seconds: videoAsset.duration.seconds, preferredTimescale: 1000)
let timeRange1 = CMTimeRange(start: startTime1, end: endTime1)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.1, timeRange: timeRange1)
arrayLayerInstructions.append(layerInstruction)
instruction.layerInstructions = arrayLayerInstructions
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(videoModal.fileID).mov")
let filePath = url.path
let fileManager = FileManager.default
do {
if fileManager.fileExists(atPath: filePath) {
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
} else {
print("FILE NOT AVAILABLE")
}
} catch _ {
}
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
let timeRangetoTrim = CMTimeRange(start: CMTime(seconds: Double(videoModal.leftRangeValue), preferredTimescale: 1000),
end: CMTime(seconds: Double(videoModal.rightRangeValue), preferredTimescale: 1000))
exporter?.timeRange = timeRangetoTrim
exporter?.shouldOptimizeForNetworkUse = false
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)// Getting error here
}
}
}
}
}
func addAudioTrack(composition: AVMutableComposition, videoAsset: AVURLAsset, withMode mode: SpeedoVideoMode, videoModal:VideoFileModel) {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let audioTracks = videoAsset.tracks(withMediaType: AVMediaTypeAudio)
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
}
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / 375
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 0))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = 375 + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: CGFloat(yFix))
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
我的代码对某些视频工作正常,有时对某些视频也不起作用。由于 AVAssetExportSessionStatus 失败,我遇到以下错误:
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSLocalizedFailureReason=An unknown error occurred (-12780), NSLocalizedDescription=The operation could not be completed, NSUnderlyingError=0x28262c240 {Error Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}}
谁能帮我解决这个问题?提前谢谢你。
最佳答案
此方法 func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
是错误的,因为您需要提供具有实际视频的 AVAssetTrack
。
但不是那样,您传递的是 AVCompositionTrack
,它仍然需要组合,所以用这个 func videoCompositionInstructionForTrack(track: AVAssetTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction 替换您的方法{
.
现在当您调用实际方法时,您需要传递 clipVideoTrack
,即 let layerInstruction = self.videoCompositionInstructionForTrack(track: clipVideoTrack, asset: videoAsset)
。
如果您仍然遇到错误,请告诉我!
关于ios - AVAssetExportSession AVFoundationErrorDomain 代码 -11800 操作无法完成,NSOSStatusErrorDomain 代码=-12780 “(null) 在 Swift iOS 中,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56930602/
我有一个应用程序,允许附加多个视频资源并向合成添加一个或多个音轨。一切似乎都有效,我可以使用 AVPlayer 播放生成的作品(尽管音频级别似乎很低)。将乐曲导出到文件后,音轨丢失。 我的代码主要基于
我有一个应用程序,可以将 AVMutableComposition 导出到 .mov 文件中,我希望用户可以使用进度条与发送短信或上传文件时的进度条相同。 当我知道任务的持续时间(例如播放音频文件)时
当应用程序在后台运行时,我尝试管理 AVAssetExportSession 工作。我有 iOS 6 设备并在那里进行测试。所以我正在制作音频混合并尝试导出音频。当应用程序在前台时,我一切正常,但如果
我正在尝试使用以下代码将2个预先存在的mpeg4视频加入到ipad2上。 -(void)mergeTestVideos { //setup asset NSString *firsta
我正在创建一个视频文件,并在其上添加动画图像。我跟踪导出进度和状态,但是在导出进度达到1.0后,不会调用回调,并且导出状态仍等于'AVAssetExportSessionStatusExporting
我想合并视频和音频文件。我的程序在调试/单步模式下按我想要的方式工作,但在运行时不工作。我想这可能是“exportAsynchronously”函数的问题,我在加载之前访问了一些值。这是我的代码。 合
AVAssetExportSession 从来没有告诉我导出何时完成,但它确实很快并且文件出现在应该出现的位置......我正在使用 exportAsynchronouslyWithCompletio
我正在尝试在应用程序中裁剪和合并多个视频。我在步骤的最后部分遇到问题,我需要将视频保存到相机卷轴并且 UIVideoAtPathIsCompatibleWithSavedPhotosAlbum 返回
我想在自定义 View 中录制视频,所以我按照以下代码使用 AVFoundation。 if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum
我正在使用 AVAssetExportSession 将多个视频合并在一起,但视频是立体声的,生成的视频是双单声道的。是否可以使用 AVAssetExportSession 合并视频并保持立体声 ch
我有一个应用程序可以将视频文件组合在一起制作一个长视频。视频之间可能会有延迟(例如,V1 从 t=0s 开始并运行 5 秒,V1 从 t=10s 开始)。在这种情况下,我希望视频卡住 V1 的最后一帧
我正在使用 AVAssetExportSession 以 640x480 的分辨率导出一些东西,这些文件有点怪异——可以预见的是怪异,但仍然怪异,因为我们需要通过 3G 网络从手机上传它们。除了降低分
我的目标是让用户从照片中选择视频,然后让他在上面添加标签。 这是我得到的: let audioAsset = AVURLAsset(url: selectedVideoURL) let videoAs
我有一个错误和一个问题。我想将修改后的视频导出到相机胶卷,但导出的视频与相机胶卷不兼容。 我也想删除最初录制的视频,以便我可以录制不止一次,但它会产生错误并且没有意义。如果我取消注释代码,则会出现错误
我正在尝试在视频的 AVMutableComposition 上应用 AVMutableVideoCompositionLayerInstruction。问题是当使用 AVAssetExportSes
我正在尝试将 2 个音频文件和 1 个视频文件合并为 1 个 .mov 文件。我用下一个代码实现它: -(void)combineData{ AVMutableComposition *mixComp
当应用程序在后台时,我无法让 AVAssetExportSession 工作。 我的应用程序启用了后台模式“Background Fetch”。 当这个 UIApplicationDelegate 方
我正在尝试对共享扩展中选择的视频使用 AVAssetExportSession 并获取 Error Domain=NSURLErrorDomain Code=-3000 "Cannot create
AVAssetExportSession 将预设作为其初始化参数之一: AVAssetExportSession(asset: AVAsset, presetName: String) 其中预设是 A
我正在使用 AVAssetExportSession 修剪音频文件,但无法在保存的文件中实现淡入淡出效果。这是我正在使用的代码。 [[NSFileManager defaultManag
我是一名优秀的程序员,十分优秀!