- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我只想提取YUV 420图像的一小部分。也就是说,从CVImageBufferRef创建一个CVImageBufferRef,它仅包含原始图像的矩形部分。
这是我到目前为止尝试过的:
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 320;
size_t cropWidthY = 320;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// create a new CVImageBufferRef from pixelY and pixelUV
CVPixelBufferRef outBuff;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outBuff);
if(logCameraSettings) {
NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outBuff, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outBuff, 0);
NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Here would be the place where I actually want to do something with the image
// TEST: show image (in debugger in following method)
[self convertToUIImage:imageBufferRef]; // --> works
[self convertToUIImage:outBuff]; // --> only gray, does not work
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(void) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
// Inspect the following UIImage in debugger.
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
}
最佳答案
这是我解决的方法
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 500;
size_t cropWidthY = 500;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// Transform input to UIImage
UIImage *inputAsUIImage = [self convertToUIImage:imageBufferRef];
// Extract subimage of UIImage
CGRect fromRect = CGRectMake(cropY_X0, cropY_Y0, cropWidthY, cropHeightY); // or whatever rectangle
CGImageRef drawImage = CGImageCreateWithImageInRect(inputAsUIImage.CGImage, fromRect);
UIImage *newImage = [UIImage imageWithCGImage:drawImage];
CGImageRelease(drawImage);
// Convert UIImage back to CVImageBufferRef
// 1. Create a CIImage with the underlying CGImage encapsulated by the UIImage (referred to as 'image'):
CIImage *inputImage = [CIImage imageWithCGImage:newImage.CGImage];
// 2. Create a CIContext:
CIContext *ciContext = [CIContext contextWithCGContext:UIGraphicsGetCurrentContext() options:nil];
// 3. Render the CIImage to a CVPixelBuffer (referred to as 'outputBuffer'):
CVPixelBufferRef outputBuffer;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outputBuffer);
[ciContext render:inputImage toCVPixelBuffer:outputBuffer];
if(logCameraSettings) {
NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outputBuffer, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outputBuffer, 0);
NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Do something with it here
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(UIImage*) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
return image;
}
关于ios - 提取CVImageBufferRef的子图像,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38038620/
- (void)processPixelBuffer: (CVImageBufferRef)pixelBuffer { CVPixelBufferLockBaseAddress( pixelB
我想使用 QTKit.framework 将所有帧写入磁盘以进行相机输入。但我得到的所有图像都是半透明的并且没有一些颜色,也许是色彩空间问题? ;(它们在预览 View 中看起来不错,但是当我编写它们
我正在尝试从 QT 电影的各个帧中提取像素数据。 我想我需要使用 CV,因为 QTKit 和 NSImage 会 太慢了... 我需要比较包含网络摄像头 (iSight) 当前帧的缓冲区 (CVIma
我正在制作一个简单的管道,从 AVCaptureSession 获取图像,在 OpenCV 中处理它们,然后在 OpenGL 中渲染它们。它基于 RosyWriter 但没有音频和录音功能。 Open
我是 iOS 编程和多媒体的新手,我正在研究一个名为 RosyWriter 的示例项目,该项目由 apple 在 this link 提供。 .在这里,我看到在代码中有一个名为 captureOutp
在 objective-c 中,您可以轻松地将 CVImageBufferRef 转换为 CVPixelBufferRef: CMSampleBufferRef sampleBuffer = some
此代码大部分都有效,但生成的数据似乎失去了颜色 channel (这是我的想法),因为生成的图像数据在显示时是蓝色的! 代码如下: UIImage* myImage=[UIImage imageNam
我正在使用 AVFoundation 并从 AVCaptureVideoDataOutput 获取样本缓冲区,我可以使用以下方法将其直接写入 videoWriter: - (void)writeBuf
我正在尝试从相机捕获视频。我已经获得了要触发的 captureOutput:didOutputSampleBuffer: 回调,它为我提供了一个示例缓冲区,然后我将其转换为 CVImageBuffer
我想分析来自 iOS 相机的每帧像素。我想知道是否有任何理由从 CVImageBufferRef 转到 CGImageRef,然后从 CGImageRef 获取像素,或者这些数据是否基本相同。想到的事
我有 CMSampleBufferRef(s),我使用 VTDecompressionSessionDecodeFrame 对其进行解码,这会在帧解码完成后生成 CVImageBufferRef,所以
我正在开发一个使用 AVFoundation 和 iDevices 后置摄像头进行实时视频处理的应用程序。 AVCaptureSession 配置了 sessionPreset AVCaputeSes
我现在想合并两个成功的概念。我成功地将 CATextLayer 分层到 CVImageBufferRef 相机框架上,然后使用 AVAssetWriterInputPixelBufferAdaptor
我尝试从 CVImageBufferRef 加载纹理: func createTextureFromCVImageBufferRef(buffer: CVImageBufferRef) { l
我有两种算法可以检测 AR 标记(ARToolKit 和 Infi)。我让他们在 iPhone 上实时检测标记。在这个阶段,我想比较一下它们的速度和准确性。我想准备一系列图像并对其进行测试识别。我可以
我是一名优秀的程序员,十分优秀!