gpt4 book ai didi

ios - 如何在 iOS 上捕获的视频中添加水印 - Swift 编程

转载 作者:搜寻专家 更新时间:2023-11-01 06:07:25 26 4
gpt4 key购买 nike

有人可以帮助我使用 Swift 编程语言为录制的视频添加水印吗?作为引用,我正在研究 AVFoundation 框架。

下面是两个要求,

  1. 使用 UILabel 作为文本的水印
  2. 使用 UIImage 的水印

提前致谢。

最佳答案

我已经找到解决方案并更新@m177312 给出的答案检查link

import UIKit
import AssetsLibrary
import AVFoundation

enum QUWatermarkPosition {
case TopLeft
case TopRight
case BottomLeft
case BottomRight
case Default
}

class QUWatermarkManager: NSObject {

func watermark(video videoAsset:AVAsset, watermarkText text : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: nil, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status: status, session: session, outputURL: outputURL)
}
}

func watermark(video videoAsset:AVAsset, imageName name : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {
self.watermark(video: videoAsset, watermarkText: nil, imageName: name, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status: status, session: session, outputURL: outputURL)
}
}

private func watermark(video videoAsset:AVAsset, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {

dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), { () -> Void in
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
var mixComposition = AVMutableComposition()

// 2 - Create video tracks
var compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
var clipVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: clipVideoTrack, atTime: kCMTimeZero, error: nil)
clipVideoTrack.preferredTransform

// Video size
let videoSize = clipVideoTrack.naturalSize

// sorts the layer in proper order and add title layer
var parentLayer = CALayer()
var videoLayer = CALayer()
parentLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height)
videoLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height)
parentLayer.addSublayer(videoLayer)

if text != nil {
// Adding watermark text
var titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.redColor().CGColor
titleLayer.string = text
titleLayer.font = "Helvetica"
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRectMake(0, 0, videoSize.width, videoSize.height)
parentLayer.addSublayer(titleLayer)

println("\(videoSize.width)")
println("\(videoSize.height)")
} else if name != nil {
// Adding image
var watermarkImage = UIImage(named: name)
var imageLayer = CALayer()
imageLayer.contents = watermarkImage?.CGImage

var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 57.0

switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize
yPosition = 0
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
default:
break
}

println("\(xPosition)")
println("\(yPosition)")

imageLayer.frame = CGRectMake(xPosition, yPosition, imageSize, imageSize)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}

var videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer)

/// instruction
var instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
var videoTrack = mixComposition.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack

let layerInstruction = self.videoCompositionInstructionForTrack(compositionVideoTrack, asset: videoAsset)

//var layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)

instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]

// 4 - Get path
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] as! String
var dateFormatter = NSDateFormatter()
dateFormatter.dateStyle = .LongStyle
dateFormatter.timeStyle = .ShortStyle
let date = dateFormatter.stringFromDate(NSDate())
let savePath = documentDirectory.stringByAppendingPathComponent("watermarkVideo-\(date).mov")
let url = NSURL(fileURLWithPath: savePath)

// 5 - Create Exporter
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter.outputURL = url
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = videoComp

// 6 - Perform the Export
exporter.exportAsynchronouslyWithCompletionHandler() {
dispatch_async(dispatch_get_main_queue(), { () -> Void in
if exporter.status == AVAssetExportSessionStatus.Completed {
let outputURL = exporter.outputURL
if flag {
// Save to library
let library = ALAssetsLibrary()
if library.videoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL) {
library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
completion!(status: AVAssetExportSessionStatus.Completed, session: exporter, outputURL: outputURL)
})
}
} else {
// Dont svae to library
completion!(status: AVAssetExportSessionStatus.Completed, session: exporter, outputURL: outputURL)
}

} else {
// Error
completion!(status: exporter.status, session: exporter, outputURL: nil)
}
})
}
})
}


private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.Up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .Right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .Left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .Up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .Down
}
return (assetOrientation, isPortrait)
}

private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack

var transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)

var scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio)
instruction.setTransform(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor),
atTime: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio)
var concat = CGAffineTransformConcat(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor), CGAffineTransformMakeTranslation(0, UIScreen.mainScreen().bounds.width / 2))
if assetInfo.orientation == .Down {
let fixUpsideDown = CGAffineTransformMakeRotation(CGFloat(M_PI))
let windowBounds = UIScreen.mainScreen().bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransformMakeTranslation(assetTrack.naturalSize.width, yFix)
concat = CGAffineTransformConcat(CGAffineTransformConcat(fixUpsideDown, centerFix), scaleFactor)
}
instruction.setTransform(concat, atTime: kCMTimeZero)
}

return instruction
}
}

感谢许多在 StackOverflow 上帮助过我的人。如果我没有标记答案在这里的任何人,我已经从堆栈溢出中获取了很多引用资料,我向他们道歉。谢谢你帮助我。希望这个回答对大家有所帮助。

关于ios - 如何在 iOS 上捕获的视频中添加水印 - Swift 编程,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/33729772/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com