- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在制作一个快速的视频应用程序。
在我的应用程序中,我需要裁剪和水平翻转 CVPixelBuffer 并返回类型也是 CVPixelBuffer 的结果。
我尝试了几件事。
首先,我使用了“CVPixelBufferCreateWithBytes”
func resizePixelBuffer(_ pixelBuffer: CVPixelBuffer, destSize: CGSize)
-> CVPixelBuffer?
{
CVPixelBufferLockAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: O))
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let pixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
var destPixelBuffer: CVPixelBuffer?
let topMargin = (height - destsize.height) / 2
let leftMargin = (width - destsize.width) / 2 * 4 // bytesPerPixel
let offset = topMargin * bytesPerRow + leftMargin
CVPixelBufferCreateWithBytes(kCFAllocatorDefault,
destSize.width,
destSize.height,
pixelFormat,
baseAddress.advanced(by: offset),
bytesPerRow,
nil, nil, nil,
&destPixelBuffer)
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: O))
return destPixelBuffer
)
func resizePixelBuffer(_ pixelBuffer, destSize: CGSize)
-> CVPixelBuffer?
{
let bufferWidth = CVPixelBufferGetWidth(pixelBuffer)
let bufferHeight = CVPixelBufferGetHeight(pixelBuffer)
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let rect = CGRect(x: (bufferWidth - destSize.width)/2, y: (bufferHeight - destSize.height)/2, width: destSize.width, height: destSize.height)
let croppedImage = ciImage.cropped(to: rect)
croppedImage.transformed(by: CGAffineTransform(translateX: -1, y: 0))
var destPixelBuffer: CVPixelBuffer?
CVPixelBufferCreate(kCFAllocatorDefault, destSize.width, destSize.height,
CVPixelBufferGetPixelFormatType(pixelBuffer), nil,
&destPixelBuffer)
CIContext().render(croppedImage, to: destPixelBuffer!, bounds: croppedImage.extent, croppedImage.colorSpace)
return destPixelBuffer
}
func resizePixelBuffer(_ pixelBuffer: CVPixelBuffer, destSize: CGSize)
-> CVPixelBuffer?
{
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let cgImage = CIContext().createCGImage(ciImage, from: ciImage.extent)
let rect = CGRect(x: (bufferWidth - destSize.width)/2, y: (bufferHeight - destSize.height)/2, width: destSize.width, height: destSize.height)
let croppedImage = cgImage.cropping(to: rect)
let width = croppedImage.width
let height = croppedImage.height
let pixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
var destPixelBuffer: CVPixelBuffer?
CVPixelBufferCreate(kCFAllocatorDefault, width, height, pixelFormat, &destPixelBuffer)
CVPixelBufferLockBaseAddress(destPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let destBaseAddress = CVPixelBufferGetBaseAddress(destPixelBuffer)
let destBytesPerRow = CVPixelBufferGetBytesPerRow(destPixelBuffer)
let context = CGContext(data: destBaseAddress,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: destBytesPerRow,
space: croppedImage.colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
context?.concatenate(__CGAffineTransformMake( 1, 0, 0, -1, 0, CGFloat(height)))
context?.draw(croppedCgImage, in: CGRect(x: 0, y: 0, width: CGFloat(width), height: CGFloat(height)))
CVPixelBufferUnlockBaseAddress(srcPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return destPixelBuffer
}
最佳答案
这是CVPixelBuffer
中的一个流程使用 vImage
.
import Cocoa
import Accelerate
extension CVPixelBuffer {
func crop(to rect: CGRect) -> CVPixelBuffer? {
CVPixelBufferLockBaseAddress(self, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(self, .readOnly) }
guard let baseAddress = CVPixelBufferGetBaseAddress(self) else {
return nil
}
let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self)
let imageChannels = 4
let startPos = Int(rect.origin.y) * inputImageRowBytes + imageChannels * Int(rect.origin.x)
let outWidth = UInt(rect.width)
let outHeight = UInt(rect.height)
let croppedImageRowBytes = Int(outWidth) * imageChannels
var inBuffer = vImage_Buffer()
inBuffer.height = outHeight
inBuffer.width = outWidth
inBuffer.rowBytes = inputImageRowBytes
inBuffer.data = baseAddress + UnsafeMutableRawPointer.Stride(startPos)
guard let croppedImageBytes = malloc(Int(outHeight) * croppedImageRowBytes) else {
return nil
}
var outBuffer = vImage_Buffer(data: croppedImageBytes, height: outHeight, width: outWidth, rowBytes: croppedImageRowBytes)
let scaleError = vImageScale_ARGB8888(&inBuffer, &outBuffer, nil, vImage_Flags(0))
guard scaleError == kvImageNoError else {
free(croppedImageBytes)
return nil
}
return croppedImageBytes.toCVPixelBuffer(pixelBuffer: self, targetWith: Int(outWidth), targetHeight: Int(outHeight), targetImageRowBytes: croppedImageRowBytes)
}
func flip() -> CVPixelBuffer? {
CVPixelBufferLockBaseAddress(self, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(self, .readOnly) }
guard let baseAddress = CVPixelBufferGetBaseAddress(self) else {
return nil
}
let width = UInt(CVPixelBufferGetWidth(self))
let height = UInt(CVPixelBufferGetHeight(self))
let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self)
let outputImageRowBytes = inputImageRowBytes
var inBuffer = vImage_Buffer(
data: baseAddress,
height: height,
width: width,
rowBytes: inputImageRowBytes)
guard let targetImageBytes = malloc(Int(height) * outputImageRowBytes) else {
return nil
}
var outBuffer = vImage_Buffer(data: targetImageBytes, height: height, width: width, rowBytes: outputImageRowBytes)
// See https://developer.apple.com/documentation/accelerate/vimage/vimage_operations/image_reflection for other transformations
let reflectError = vImageHorizontalReflect_ARGB8888(&inBuffer, &outBuffer, vImage_Flags(0))
// let reflectError = vImageVerticalReflect_ARGB8888(&inBuffer, &outBuffer, vImage_Flags(0))
guard reflectError == kvImageNoError else {
free(targetImageBytes)
return nil
}
return targetImageBytes.toCVPixelBuffer(pixelBuffer: self, targetWith: Int(width), targetHeight: Int(height), targetImageRowBytes: outputImageRowBytes)
}
}
extension UnsafeMutableRawPointer {
// Converts the vImage buffer to CVPixelBuffer
func toCVPixelBuffer(pixelBuffer: CVPixelBuffer, targetWith: Int, targetHeight: Int, targetImageRowBytes: Int) -> CVPixelBuffer? {
let pixelBufferType = CVPixelBufferGetPixelFormatType(pixelBuffer)
let releaseCallBack: CVPixelBufferReleaseBytesCallback = {mutablePointer, pointer in
if let pointer = pointer {
free(UnsafeMutableRawPointer(mutating: pointer))
}
}
var targetPixelBuffer: CVPixelBuffer?
let conversionStatus = CVPixelBufferCreateWithBytes(nil, targetWith, targetHeight, pixelBufferType, self, targetImageRowBytes, releaseCallBack, nil, nil, &targetPixelBuffer)
guard conversionStatus == kCVReturnSuccess else {
free(self)
return nil
}
return targetPixelBuffer
}
}
// Change this to your input pixelBuffer
var pixelBuffer: CVPixelBuffer?
// The result would be stored in resultPixelBuffer
let resultPixelBuffer = pixelBuffer?.crop(to: CGRect(x: 50, y: 50, width: 100, height: 100))?.flip()
关于swift - 如何裁剪和翻转 CVPixelBuffer 并返回 CVPixelBuffer?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/55287140/
我正在制作一个快速的视频应用程序。 在我的应用程序中,我需要裁剪和水平翻转 CVPixelBuffer 并返回类型也是 CVPixelBuffer 的结果。 我尝试了几件事。 首先,我使用了“CVPi
我有 2 个图像的 CVPixelBuffers。我想合并这两个图像,使第二个图像位于第一个图像之上,位于我想要的坐标处。如果有人能在这方面指导我,我将不胜感激。 我不想使用上下文绘制,因为我试图降低
在CVPixelBuffer对象中,有一个或多个平面。 ( reference )我们有方法获取数字,高度,平面的基地址。 那么飞机到底是什么?它是如何在 CVPixelBuffer 中构建的? 示例
我正在创建一个应用程序来从 iPhone 进行屏幕截图。因此,在完成编码后,我使用分析和分析来检查内存泄漏。我在代码的一个部分中仅遇到一处内存泄漏。这是我的代码,它给了我内存泄漏。 -(void) w
简短的问题是:在 CVPixelBuffer 中处理像素值的公式是什么? 我正在尝试将 CVPixelBuffer 转换为平面字节数组,并注意到一些奇怪的事情: CVPixelBuffer 是从 CM
假设我有一些与变量关联的像素缓冲区: CVPixelBufferRef; 我想克隆那个缓冲区及其所有内容,并将克隆的缓冲区分配给另一个变量。最正确、最快速的方法是什么? 最佳答案 到目前为止,我还没有
我正在尝试用人工创建的数据创建图像,并希望使用 CVPixelBuffer: private func RGBAImage(width w: Int, height h: Int) -> UI
我为 CoreML 将 UIImage 转换为 CVPixelBuffer,但我想更改 RGB 像素,如 R/1.5、G/2、B/2.5。 我可以使用assumingMemoryBound(to :
我正在尝试将 CVPixelBuffer 的大小调整为 128x128。我正在使用 750x750 的显示器。我目前正在使用 CVPixelBuffer 创建一个新的 CGImage,我调整它的大小然
我正在使用 xcode,目前正在尝试使用以下代码从像素缓冲区中提取像素值。但是,当我打印出像素值时,它包含负值。有人遇到过这样的问题吗? 部分代码如下 - (void)captureOutput:(A
我想使用 Apple Metal 渲染路径来处理 CVPixelBuffer。 如何转换 CVPixelBuffer 使其符合顶点着色器的输入?不确定如何从 CVPixelBuffer 中提取颜色/位
我通过 OpenGLES 方法(glReadPixels)或其他方式获取像素,然后创建 CVPixelBuffer(带或不带 CGImage)用于视频录制,但最终图片失真。当我在 iPhone 5c、
如何转换 CGImage到 CVPixelBuffer swift ? 我知道很多问题试图做相反的事情,还有一些客观的 C 答案,比如 this one但我无法让他们 swift 工作。这是我得到的最
我从 ARSessionDelegate 得到一个 CVPixelBuffer: func session(_ session: ARSession, didUpdate frame: ARFrame
我很难想出在任何 iOS 设备上可靠地复制 CVPixelBuffer 的代码。在我在 iPad Pro 上尝试之前,我的第一次尝试效果很好: extension CVPixelBuffer {
我正在尝试让在 2017 年 WWDC 上演示的 Apple 示例 Core ML 模型正常运行。我正在使用 GoogLeNet 尝试对图像进行分类(请参阅 Apple Machine Learnin
我想从 didFinishProcessingPhoto 委托(delegate)方法获取 PixelBuffer,但它是零。 func capturePhoto() { let fo
我正在尝试以预期的帧速率将 CVPixelBuffers 附加到 AVAssetWriterInputPixelBufferAdaptor,但它似乎太快了,而且我的数学失败了。这不是从相机捕捉,而是捕
我只需要处理每秒 60 帧中的 20 帧(CVPixelBuffer)。 如何在 ARKit session 中捕获每三个 ARFrame?我需要大约 20 fps 的捕获速度(我知道可能会出现掉帧)
我目前正在使用 Metal 开发实时过滤器。在定义我的 CIImage 之后,我将图像渲染到 MTLTexture。 下面是我的渲染代码。 context 是由 Metal 支持的 CIContext
我是一名优秀的程序员,十分优秀!