- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
最佳答案
Swift 4 - 为视频添加模糊背景
1.单视频支持❤️
1.多个视频合并支持❤️
2.支持任意比例任意 Canvas ❤️
3. 将最终视频保存到相机胶卷❤️
5. 管理所有视频方向❤️
可能是我迟到了这个答案,但我仍然没有找到任何解决这个要求的方法。所以分享我的作品:👍👍
⭐ Download Sample Code Here ⭐
为视频添加模糊背景的步骤:
func mergeVideos(_ videos: Array<AVURLAsset>, inArea area:CGSize, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []
for asset in videos {
// Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {
/// Hide time for this video's layer
let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
let opacityEndTime: CMTime = CMTimeAdd(mixComposition.duration, asset.duration)
let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)
let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
try? track?.insertTimeRange(timeRange, of: videoTrack, at: mixComposition.duration)
/// Layer instrcution
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
layerInstruction.setOpacity(0.0, at: hideAfter)
/// Add logic for aspectFit in given area
let properties = scaleAndPositionInAspectFillMode(forTrack: videoTrack, inArea: area)
/// Checking for orientation
let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
let assetSize = self.assetSize(forTrack: videoTrack)
if (videoOrientation == .down) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = 0
if properties.position.y == 0 {
xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .left) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .right) {
/// No need to rotate
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)
let finalTransform = scaleTransform.concatenating(translationTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = 0
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
instructionLayers.append(layerInstruction)
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
mainInstruction.layerInstructions = instructionLayers
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = area
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = .mp4
exporter?.videoComposition = mainCompositionInst
exporter?.shouldOptimizeForNetworkUse = true
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
func addBlurEffect(toVideo asset:AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
let filter = CIFilter(name: "CIGaussianBlur")
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source: CIImage? = request.sourceImage.clampedToExtent()
filter?.setValue(source, forKey: kCIInputImageKey)
filter?.setValue(10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output: CIImage? = filter?.outputImage?.cropped(to: request.sourceImage.extent)
// Provide the filter output to the composition
if let anOutput = output {
request.finish(with: anOutput, context: nil)
}
})
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
// Remove any prevouis videos at that path
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetHighestQuality)
// assign all instruction for the video processing (in this case the transformation for cropping the video
exporter?.videoComposition = composition
exporter?.outputFileType = .mp4
exporter?.outputURL = url
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
func addAllVideosAtCenterOfBlur(videos: Array<AVURLAsset>, blurVideo: AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []
// Add blur video first
let blurVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Blur layer instruction
if let videoTrack = blurVideo.tracks(withMediaType: AVMediaType.video).first {
let timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
try? blurVideoTrack?.insertTimeRange(timeRange, of: videoTrack, at: kCMTimeZero)
}
/// Add other videos at center of the blur video
var startAt = kCMTimeZero
for asset in videos {
/// Time Range of asset
let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
// Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {
/// Hide time for this video's layer
let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
let opacityEndTime: CMTime = CMTimeAdd(startAt, asset.duration)
let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)
/// Adding video track
try? track?.insertTimeRange(timeRange, of: videoTrack, at: startAt)
/// Layer instrcution
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
layerInstruction.setOpacity(0.0, at: hideAfter)
/// Add logic for aspectFit in given area
let properties = scaleAndPositionInAspectFitMode(forTrack: videoTrack, inArea: size)
/// Checking for orientation
let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
let assetSize = self.assetSize(forTrack: videoTrack)
if (videoOrientation == .down) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = 0
if properties.position.y == 0 {
xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .left) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .right) {
/// No need to rotate
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)
let finalTransform = scaleTransform.concatenating(translationTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = 0
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
instructionLayers.append(layerInstruction)
}
/// Adding audio
if let audioTrack = asset.tracks(withMediaType: AVMediaType.audio).first {
let aTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try? aTrack?.insertTimeRange(timeRange, of: audioTrack, at: startAt)
}
// Increase the startAt time
startAt = CMTimeAdd(startAt, asset.duration)
}
/// Blur layer instruction
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: blurVideoTrack!)
instructionLayers.append(layerInstruction)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
mainInstruction.layerInstructions = instructionLayers
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = size
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = .mp4
exporter?.videoComposition = mainCompositionInst
exporter?.shouldOptimizeForNetworkUse = true
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
关于ios - AVFoundation - 为视频添加模糊背景,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44105717/
我正在学习 Javascript,我正在尝试创建一个简单的下拉菜单。我想要的功能的示例可以在 Google 主页的顶部菜单中看到,其中包含“更多”和“设置”下拉菜单。 我有一个使用 onclick()
我尝试捕捉 tinyMce 编辑器的模糊和焦点事件。 我为此找到了以下方法。 ed.onInit.add(function(ed) { tiny
这里完全被难住了。尝试一些很简单的东西,但它不起作用: $("input.input1, textarea.input1").focus(function(){ $(this).addClas
我有以下 jQuery 函数: 提交表单 $(".content").delegate('.entryButton','click', function() {var form = $(this).c
如何使用 jQuery 在焦点/模糊上切换元素的 CSS? $('.answerSpace').bind('blur', function(){ $('.normProf').toggleClass(
在我的 iPhone 应用程序中,我有一个黑白 UIImage。我需要模糊该图像(高斯模糊即可)。 iPhone 显然知道如何模糊图像,如 it does that when it draws sha
这个问题已经有答案了: Blurring an image via CSS? (6 个回答) 已关闭 7 年前。 我有一个场景。我想随着循环的进行模糊我的图像。我怎样才能做到这一点?这是我的代码。
这个错误是在子字符串方法上抛出的,我发现很多线程都在处理这个问题,但我遇到的问题似乎有所不同。我知道如果您的字符串短于子字符串(开始,结束)大小,它会抛出此错误,但在任何内容传递到方法调用之前都会抛出
是否有简单的解决方案可以在 Qt 中为图像添加运动模糊?还没有找到任何关于模糊的好教程。我需要一些非常简单的东西,我可以理解,如果我可以改变模糊角度,那就太好了。 最佳答案 Qt 没有运动模糊过滤器。
我的搜索框在正常状态下很小。焦点对准时,它会展开,并显示一个提交按钮。这样做是为了节省空间。现在,在模糊时,搜索框再次缩小,提交按钮消失。 问题是,通过使提交按钮成为“竞赛”以在正确的位置单击它,对提
您好,我正在使用 PngBitmapEncoder 从 Canvas 在内存中创建图像。 public void CaptureGraphic() { Canvas canvas = new
我已经搜索过谷歌、这个和其他论坛,但无济于事……太棒了,有没有可能有像 onMiddleClick="blur();"这样的东西?在单击鼠标中键时隐藏链接的焦点边框? 最佳答案 $('a').clic
我无法在我的应用程序中正确渲染我的纹理。我使用的艺术品是精确的,并且已经缩放且尺寸合适,但是当我在手机上渲染它时,我的纹理突然不如原始艺术品清晰/精确,我不明白为什么。 有人遇到过这个问题吗? 最佳答
这里有与上述主题相同的问题但没有得到答复 我这里有布局 我需要在底部布局中使用与顶部布局相同的图像,但使用模糊样式 设置 alpha 没有帮助 - TextView 也会影响 如何虚化down布局的背
我已经搜索了一段时间,但到目前为止没有找到适合我需要的东西。 ( This was helpful, but not convincing ) 从两个不同的来源,我得到两个不同的字符串。我想检查较短的
我有这样的代码: var passwordTextBox = angular.element("#password"); passwordTextBox.blur(function()
设置此 JQuery 函数无法正常工作。有时,如果我单击元素,什么也没有发生,并且它会触发隐藏可折叠菜单的功能,如果单击文档上的任意位置,则不会重定向到正确的 href。有什么更好的方法吗? HTML
尝试通过将坐标列表保存到数组来在多个位置裁剪我的图像后,裁剪区域中的字母变得非常模糊,我无法弄清楚原因。 原图看起来像 裁剪后的图像看起来像 题中代码如下: import numpy as np im
我知道我们可以调暗/模糊屏幕,如 this post 所示. 我应该怎么做才能使它的一部分变暗/模糊,使单个(或多个) View 没有任何效果,从而使整个屏幕具有突出显示 View 的效果? 此外,即
如果有人曾经向 digg 提交过故事,它会检查该故事是否已经提交,我假设是通过模糊搜索。 我想实现类似的东西,想知道他们是否使用开源的 php 类? Soundex 不这样做,句子/字符串的长度可达
我是一名优秀的程序员,十分优秀!