gpt4 book ai didi

opencv - iOS 静态图像捕获使用 CVVideoCamera (OpenCV)

转载 作者:太空宇宙 更新时间:2023-11-03 21:19:31 25 4
gpt4 key购买 nike

我在 iOS 上使用 opencv 2.4.9,需要帮助。

我想在拍摄高分辨率照片时使用 CVVideoCamera 进行拍摄。我需要用于过程图像方法的摄像机,以通过边缘检测添加成熟的文档捕获。这也很好用,但一旦检测到文档,我需要一张已识别的文档高分辨率照片。

我找到了这个http://code.opencv.org/svn/gsoc2012/ios/trunk/HelloWorld_iOS/HelloWorld_iOS/VideoCameraController.m

这同时适用于照片和视频,但过程图像方法与 cvvideocamera 委托(delegate)不同,我的算法不适用于此类:-/

但我在 OpenCV 中搜索 CVVideoCamera 的摄影机/摄像机解决方案。

希望得到帮助,抱歉我的英语不好

最佳答案

我通过将 cvVideoCamera 类的一些方法添加到 cvPhotoCamera 类中解决了这个问题。它适用于我,但可能需要对您的代码进行一些改进。

CvPhotoCameraMod.h:

#import <UIKit/UIKit.h>
#import <opencv2/highgui/cap_ios.h>
#import <opencv2/highgui/ios.h>

#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)

@class CvPhotoCameraMod;

@protocol CvPhotoCameraDelegateMod <CvPhotoCameraDelegate>
- (void)processImage:(cv::Mat&)image;
@end

@interface CvPhotoCameraMod : CvPhotoCamera <AVCaptureVideoDataOutputSampleBufferDelegate>

@property (nonatomic, retain) CALayer *customPreviewLayer;
@property (nonatomic, retain) AVCaptureVideoDataOutput *videoDataOutput;
@property (nonatomic, weak) id <CvPhotoCameraDelegateMod> delegate;

- (void)createCustomVideoPreview;

@end

CvPhotoCameraMod.mm:

#import "CvPhotoCameraMod.h"
#import <CoreGraphics/CoreGraphics.h>
#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)

@implementation CvPhotoCameraMod


-(void)createCaptureOutput;
{
[super createCaptureOutput];
[self createVideoDataOutput];
}
- (void)createCustomVideoPreview;
{
[self.parentView.layer addSublayer:self.customPreviewLayer];
}


//Method mostly taken from this source: https://github.com/Itseez/opencv/blob/b46719b0931b256ab68d5f833b8fadd83737ddd1/modules/videoio/src/cap_ios_video_camera.mm

-(void)createVideoDataOutput{
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];

//Drop grayscale support here
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];


// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];

// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}

// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}

// create a custom preview layer
self.customPreviewLayer = [CALayer layer];

self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);

self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);

// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
dispatch_queue_t videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
}

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
if (self.delegate) {

// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);

void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;

CGColorSpaceRef colorSpace;
CGContextRef context;

int format_opencv;

OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {

format_opencv = CV_8UC1;

bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);

} else { // expect kCVPixelFormatType_32BGRA

format_opencv = CV_8UC4;

bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);

}

// delegate image processing to the delegate
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);

CGImage* dstImage;

if ([self.delegate respondsToSelector:@selector(processImage:)]) {
[self.delegate processImage:image];
}

// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}


// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;

// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}

if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {

NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);

// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);

CGDataProviderRelease(provider);
}


// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});


// cleanup
CGImageRelease(dstImage);

CGColorSpaceRelease(colorSpace);

CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}


@end

关于opencv - iOS 静态图像捕获使用 CVVideoCamera (OpenCV),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/23495107/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com