- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
所以我使用了一些在 Objective C 中执行此操作的代码,并且我一直在将其转换为 swift,并且我正在努力从 AVCaptureStillImageOutput
创建一个 CIImage
。因此,如果有人可以查看此代码并告诉我哪里出错了,那就太好了。
这是 objective-c 代码
- (void)captureImageWithCompletionHander:(void(^)(NSString *fullPath))completionHandler
{
dispatch_suspend(_captureQueue);
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in self.stillImageOutput.connections)
{
for (AVCaptureInputPort *port in connection.inputPorts)
{
if ([port.mediaType isEqual:AVMediaTypeVideo] )
{
videoConnection = connection;
break;
}
}
if (videoConnection) break;
}
__weak typeof(self) weakSelf = self;
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
if (error)
{
dispatch_resume(_captureQueue);
return;
}
__block NSArray *filePath = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); //create an array and store result of our search for the documents directory in it
NSString *documentsDirectory = [filePath objectAtIndex:0]; //create NSString object, that holds our exact path to the documents directory
NSString *fullPath = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:@"/iScan_img_%i.pdf",(int)[NSDate date].timeIntervalSince1970]];
@autoreleasepool
{
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
CIImage *enhancedImage = [[CIImage alloc] initWithData:imageData options:@{kCIImageColorSpace:[NSNull null]}];
imageData = nil;
if (weakSelf.cameraViewType == DocScannerCameraViewTypeBlackAndWhite)
{
enhancedImage = [self filteredImageUsingEnhanceFilterOnImage:enhancedImage];
}
else
{
enhancedImage = [self filteredImageUsingContrastFilterOnImage:enhancedImage];
}
if (weakSelf.isBorderDetectionEnabled && rectangleDetectionConfidenceHighEnough(_imageDedectionConfidence))
{
CIRectangleFeature *rectangleFeature = [self biggestRectangleInRectangles:[[self highAccuracyRectangleDetector] featuresInImage:enhancedImage]];
if (rectangleFeature)
{
enhancedImage = [self correctPerspectiveForImage:enhancedImage withFeatures:rectangleFeature];
}
}
CIFilter *transform = [CIFilter filterWithName:@"CIAffineTransform"];
[transform setValue:enhancedImage forKey:kCIInputImageKey];
NSValue *rotation = [NSValue valueWithCGAffineTransform:CGAffineTransformMakeRotation(-90 * (M_PI/180))];
[transform setValue:rotation forKey:@"inputTransform"];
enhancedImage = transform.outputImage;
if (!enhancedImage || CGRectIsEmpty(enhancedImage.extent)) return;
static CIContext *ctx = nil;
if (!ctx)
{
ctx = [CIContext contextWithOptions:@{kCIContextWorkingColorSpace:[NSNull null]}];
}
CGSize bounds = enhancedImage.extent.size;
bounds = CGSizeMake(floorf(bounds.width / 4) * 4,floorf(bounds.height / 4) * 4);
CGRect extent = CGRectMake(enhancedImage.extent.origin.x, enhancedImage.extent.origin.y, bounds.width, bounds.height);
static int bytesPerPixel = 8;
uint rowBytes = bytesPerPixel * bounds.width;
uint totalBytes = rowBytes * bounds.height;
uint8_t *byteBuffer = malloc(totalBytes);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
[ctx render:enhancedImage toBitmap:byteBuffer rowBytes:rowBytes bounds:extent format:kCIFormatRGBA8 colorSpace:colorSpace];
CGContextRef bitmapContext = CGBitmapContextCreate(byteBuffer,bounds.width,bounds.height,bytesPerPixel,rowBytes,colorSpace,kCGImageAlphaNoneSkipLast);
CGImageRef imgRef = CGBitmapContextCreateImage(bitmapContext);
CGColorSpaceRelease(colorSpace);
CGContextRelease(bitmapContext);
free(byteBuffer);
if (imgRef == NULL)
{
CFRelease(imgRef);
return;
}
saveCGImageAsJPEGToFilePath(imgRef, fullPath);
CFRelease(imgRef);
dispatch_async(dispatch_get_main_queue(), ^
{
completionHandler(fullPath);
dispatch_resume(_captureQueue);
});
_imageDedectionConfidence = 0.0f;
}
}];
现在基本上它捕获内容,如果某些 if
语句为真,那么它捕获显示的 CIRectangleFeature
中的内容,然后转换 CIImage
到要在保存函数中调用的 CGImage
。
我已经把它翻译成这样了。
func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) {
self.captureQueue?.suspend()
var videoConnection: AVCaptureConnection!
for connection in self.stillImageOutput.connections{
for port in (connection as! AVCaptureConnection).inputPorts {
if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) {
videoConnection = connection as! AVCaptureConnection
break
}
}
if videoConnection != nil {
break
}
}
weak var weakSelf = self
self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer, error) -> Void in
if error != nil {
self.captureQueue?.resume()
return
}
let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory: String = filePath[0]
let fullPath: String = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("iScan_img_\(Int(Date().timeIntervalSince1970)).pdf").absoluteString
autoreleasepool {
let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer))
var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()])
if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite {
enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!)
}
else {
enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!)
}
if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) {
let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!))
if rectangleFeature != nil {
enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!)
}
}
let transform = CIFilter(name: "CIAffineTransform")
let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi / 180)))
transform?.setValue(rotation, forKey: "inputTransform")
enhancedImage = transform?.outputImage
if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! {
return
}
var ctx: CIContext?
if (ctx != nil) {
ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()])
}
var bounds: CGSize = (enhancedImage?.extent.size)!
bounds = CGSize(width: CGFloat((floorf(Float(bounds.width)) / 4) * 4), height: CGFloat((floorf(Float(bounds.height)) / 4) * 4))
let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height))
let bytesPerPixel: CGFloat = 8
let rowBytes = bytesPerPixel * bounds.width
let totalBytes = rowBytes * bounds.height
let byteBuffer = malloc(Int(totalBytes))
let colorSpace = CGColorSpaceCreateDeviceRGB()
ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace)
let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
let imgRef = bitmapContext?.makeImage()
free(byteBuffer)
self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath)
DispatchQueue.main.async(execute: {() -> Void in
completionHandler(fullPath)
self.captureQueue?.resume()
})
self.imageDedectionConfidence = 0.0
}
}
}
因此它需要 AVCaptureStillImageOutput
将其转换为 CIImage
用于所有需要的用途,然后将其转换为 CGImage
以进行保存。我在翻译中到底做错了什么?或者有更好的方法吗?
我真的不想问这个,但我似乎找不到任何像这样的问题,或者至少找不到任何涉及从 AVCaptureStillImageOutput 捕获为
。CIImage
的问题
感谢您的帮助!
最佳答案
这是 swift 中的正确翻译 再次感谢 Prientus 帮助我找到我的错误
func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) {
self.captureQueue?.suspend()
var videoConnection: AVCaptureConnection!
for connection in self.stillImageOutput.connections{
for port in (connection as! AVCaptureConnection).inputPorts {
if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) {
videoConnection = connection as! AVCaptureConnection
break
}
}
if videoConnection != nil {
break
}
}
weak var weakSelf = self
self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer: CMSampleBuffer?, error) -> Void in
if error != nil {
self.captureQueue?.resume()
return
}
let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory: String = filePath[0]
let fullPath: String = documentsDirectory.appending("/iScan_img_\(Int(Date().timeIntervalSince1970)).pdf")
autoreleasepool {
let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer))
var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()])
if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite {
enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!)
}
else {
enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!)
}
if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) {
let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!))
if rectangleFeature != nil {
enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!)
}
}
let transform = CIFilter(name: "CIAffineTransform")
transform?.setValue(enhancedImage, forKey: kCIInputImageKey)
let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi / 180)))
transform?.setValue(rotation, forKey: "inputTransform")
enhancedImage = (transform?.outputImage)!
if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! {
return
}
var ctx: CIContext?
if (ctx == nil) {
ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()])
}
var bounds: CGSize = (enhancedImage!.extent.size)
bounds = CGSize(width: CGFloat((floorf(Float(bounds.width)) / 4) * 4), height: CGFloat((floorf(Float(bounds.height)) / 4) * 4))
let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height))
let bytesPerPixel: CGFloat = 8
let rowBytes = bytesPerPixel * bounds.width
let totalBytes = rowBytes * bounds.height
let byteBuffer = malloc(Int(totalBytes))
let colorSpace = CGColorSpaceCreateDeviceRGB()
ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace)
let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
let imgRef = bitmapContext?.makeImage()
free(byteBuffer)
if imgRef == nil {
return
}
self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath)
DispatchQueue.main.async(execute: {() -> Void in
completionHandler(fullPath)
self.captureQueue?.resume()
})
self.imageDedectionConfidence = 0.0
}
}
}
关于ios - 如何快速从 AVCaptureStillImageOutput 创建 CIImage?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42539424/
我在 iPhone SDK 中的新 AVFoundation 类中遇到了奇怪的行为。 我有一个用于拍照的 AVCaptureStillImageOutput,并且我正在根据自己的喜好设置其输出设置。代
我是第一次使用 AVCaptureStillImageOutput,我在某个时候保存了一张 JPEG 图像。我想保存 PNG 图像而不是 JPEG 图像。我需要为此做什么? 我在应用程序中有这 3 行
我正在使用 captureStillImageAsynchronouslyFromConnection 从相机捕捉图像 图像正在倒置保存。我尝试使用 保存图像 UIImage *flippedImag
我在做一些我认为不应该那么困难的事情上遇到了困难,所以我想我一定是从错误的角度看待问题。为了了解 AVCaptureStillImageOutput 和相机的工作原理,我制作了一个小应用程序。 这个应
我正在使用 AVCaptureStillImageOutput 捕获图像。一切都做得很好。但我尝试导航到新的 View Controller ,快门声很困难。有什么解决方法可以避免这种情况吗? 最佳答
我正在尝试裁剪从 AVCaptureStillImageOutput 获取的图像,但无法在正确的矩形处正确裁剪。 我的相机视频预览为 320x458 帧,裁剪矩形出现在该预览帧内,其坐标和大小为 CG
如何设置完成处理程序: captureStillImageAsynchronouslyFromConnection:completionHandler: 用于 AVCaptureStillImageO
我正在使用 AVCaptureConnection和 AVCaptureStillImageOutput创建覆盖屏幕并捕获图像的类。在 View 中,我有一个自定义选项卡栏,其中包含一些自定义控件,如
这是我迄今为止尝试的相机配置: AVCaptureSession *session = [[AVCaptureSession alloc] init]; [session setSes
我正在做一个项目,我想遮盖用户刚刚用他们的相机拍摄的照片。 mask 以特定的纵横比创建,以将信箱添加到照片。 我可以成功创建图像、创建蒙版并将两者都保存到相机胶卷,但我无法将蒙版应用于图像。这是我现
我目前正在编写一段代码,如下所示: if error == nil && (captureSession?.canAddInput(input))! { captureSession?.add
所以我使用了一些在 Objective C 中执行此操作的代码,并且我一直在将其转换为 swift,并且我正在努力从 AVCaptureStillImageOutput 创建一个 CIImage。因此
我正在尝试从 AVCaptureStillImageOutput 捕获像素数据,并注意到在将图像裁剪为 CGImage 后,它会重新定向。为了对此进行测试,我将临时图像输出到照片库。现在我注意到,即使
我正在使用 AVFoundation 捕捉图像。我正在使用 AVCaptureVideoPreviewLayer 在屏幕上显示摄像头画面。此预览层的框架获取具有动态尺寸的 UIView 的边界: AV
我尝试构建一个从 iPhone 相机捕获帧并对这些帧进行一些处理的应用程序。我尝试了一些在互联网上找到的代码,例如这里:How to capture image without displaying
我正在尝试使用 AVFoundation 制作照片。当我将 obj-c 代码翻译成 swift 来执行此操作时,我的程序在运行我尝试查找视频连接的部分时卡住了。有什么线索吗? let captureS
使用jpegStillImageNSDataRepresentation:方法可以从AVCapureStillImageOutput获取NSData,然后我可以将数据写入文件。 NSData * im
我使用 AVFoundation 在 UIView 中捕获图像并向其添加注释。现在,我需要获取拍摄图像的地理位置。我当前的代码片段如下所示。元数据没有地理位置。也许我将不得不使用 CLLocation
我想捕获Mac的屏幕,并且我知道AVCaptureStillImageOutput可以工作。但我不知道如何在Mac中使用它。 我希望有人能给我一些有关使用该类捕获屏幕的示例代码。或者一些建议也可以。
我正在尝试使用我正在使用的应用程序发送照片,我已经让该应用程序拍照,然后当您点击发送时,它会发送您刚刚拍摄的照片以通过邮件发送。 但我不知道如何将 AVCaptureStillImageOutput
我是一名优秀的程序员,十分优秀!