- android - RelativeLayout 背景可绘制重叠内容
- android - 如何链接 cpufeatures lib 以获取 native android 库?
- java - OnItemClickListener 不起作用,但 OnLongItemClickListener 在自定义 ListView 中起作用
- java - Android 文件转字符串
我有一个正在开发的小型增强现实应用程序,我想知道如何保存用户通过点击按钮或计时器看到的内容的屏幕截图。
该应用通过在另一个 UIView 上方叠加实时摄像头画面来工作。我可以使用电源按钮 + 主页按钮保存屏幕截图,这些将保存到相机胶卷中。但是,即使我要求窗口自行保存,Apple 也不会渲染 AVCaptureVideoPreviewLayer。它将在预览层所在的位置创建一 block 透明的 Canvas 。
增强现实应用保存屏幕截图(包括透明度和 subview )的正确方法是什么?
//displaying a live preview on one of the views
-(void)startCapture
{
captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = nil;
// AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in videoDevices) {
if(useFrontCamera){
if (device.position == AVCaptureDevicePositionFront) {
//FRONT-FACING CAMERA EXISTS
audioCaptureDevice = device;
break;
}
}else{
if (device.position == AVCaptureDevicePositionBack) {
//Rear-FACING CAMERA EXISTS
audioCaptureDevice = device;
break;
}
}
}
NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
if (audioInput) {
[captureSession addInput:audioInput];
}
else {
// Handle the failure.
}
if([captureSession canAddOutput:captureOutput]){
captureOutput = [[AVCaptureVideoDataOutput alloc] init];
[captureOutput setAlwaysDiscardsLateVideoFrames:YES];
[captureOutput setSampleBufferDelegate:self queue:queue];
[captureOutput setVideoSettings:videoSettings];
dispatch_release(queue);
}else{
//handle failure
}
previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
UIView *aView = arOverlayView;
previewLayer.frame =CGRectMake(0,0, arOverlayView.frame.size.width,arOverlayView.frame.size.height); // Assume you want the preview layer to fill the view.
[aView.layer addSublayer:previewLayer];
[captureSession startRunning];
}
//ask the entire window to draw itself in a graphics context. This call will not render
//AVCaptureVideoPreviewLayer .它必须替换为 UIImageView 或基于 GL 的 View 。//请参阅以下代码以创建动态更新的 UIImageView -(void)saveScreenshot {
UIGraphicsBeginImageContext(appDelegate.window.bounds.size);
[appDelegate.window.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *screenshot = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(screenshot, self,
@selector(image:didFinishSavingWithError:contextInfo:), nil);
}
//image saved to camera roll callback
- (void)image:(UIImage *)image didFinishSavingWithError:(NSError *)error
contextInfo:(void *)contextInfo
{
// Was there an error?
if (error != NULL)
{
// Show error message...
NSLog(@"save failed");
}
else // No errors
{
NSLog(@"save successful");
// Show message image successfully saved
}
}
这是创建图像的代码:
//您需要将您的 View Controller 作为委托(delegate)添加到相机输出以接收缓冲数据通知
-(void)activateCameraFeed
{
//this is the code responsible for capturing feed for still image processing
dispatch_queue_t queue = dispatch_queue_create("com.AugmentedRealityGlamour.ImageCaptureQueue", NULL);
captureOutput = [[AVCaptureVideoDataOutput alloc] init];
[captureOutput setAlwaysDiscardsLateVideoFrames:YES];
[captureOutput setSampleBufferDelegate:self queue:queue];
[captureOutput setVideoSettings:videoSettings];
dispatch_release(queue);
//......configure audio feed, add inputs and outputs
}
//buffer delegate callback
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
if ( ignoreImageStream )
return;
[self performImageCaptureFrom:sampleBuffer];
}
创建一个 UIImage:
- (void) performImageCaptureFrom:(CMSampleBufferRef)sampleBuffer
{
CVImageBufferRef imageBuffer;
if ( CMSampleBufferGetNumSamples(sampleBuffer) != 1 )
return;
if ( !CMSampleBufferIsValid(sampleBuffer) )
return;
if ( !CMSampleBufferDataIsReady(sampleBuffer) )
return;
imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if ( CVPixelBufferGetPixelFormatType(imageBuffer) != kCVPixelFormatType_32BGRA )
return;
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGImageRef newImage = nil;
if ( cameraDeviceSetting == CameraDeviceSetting640x480 )
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
newImage = CGBitmapContextCreateImage(newContext);
CGColorSpaceRelease( colorSpace );
CGContextRelease(newContext);
}
else
{
uint8_t *tempAddress = malloc( 640 * 4 * 480 );
memcpy( tempAddress, baseAddress, bytesPerRow * height );
baseAddress = tempAddress;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst);
newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
newContext = CGBitmapContextCreate(baseAddress, 640, 480, 8, 640*4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGContextScaleCTM( newContext, (CGFloat)640/(CGFloat)width, (CGFloat)480/(CGFloat)height );
CGContextDrawImage(newContext, CGRectMake(0,0,640,480), newImage);
CGImageRelease(newImage);
newImage = CGBitmapContextCreateImage(newContext);
CGColorSpaceRelease( colorSpace );
CGContextRelease(newContext);
free( tempAddress );
}
if ( newImage != nil )
{
//modified for iOS5.0 with ARC
tempImage = [[UIImage alloc] initWithCGImage:newImage scale:(CGFloat)1.0 orientation:cameraImageOrientation];
CGImageRelease(newImage);
//this call creates the illusion of a preview layer, while we are actively switching images created with this method
[self performSelectorOnMainThread:@selector(newCameraImageNotification:) withObject:tempImage waitUntilDone:YES];
}
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
}
用实际上可以在图形上下文中呈现的 UIView 更新界面:
- (void) newCameraImageNotification:(UIImage*)newImage
{
if ( newImage == nil )
return;
[arOverlayView setImage:newImage];
//or do more advanced processing of the image
}
最佳答案
如果您想要屏幕上的快照,这就是我在我的一个相机应用程序中所做的。我已经很长时间没有碰过这段代码了,所以现在可能有更好的 5.0 方式,但它已经超过 100 万次下载。有一个用于抓取基于 UIView 的屏幕的函数和一个用于抓取 Open/GLES1 屏幕的函数:
//
// ScreenCapture.m
// LiveEffectsCam
//
// Created by John Carter on 10/8/10.
//
#import "ScreenCapture.h"
#import <QuartzCore/CABase.h>
#import <QuartzCore/CATransform3D.h>
#import <QuartzCore/CALayer.h>
#import <QuartzCore/CAScrollLayer.h>
#import <OpenGLES/EAGL.h>
#import <OpenGLES/ES1/gl.h>
#import <OpenGLES/ES1/glext.h>
#import <QuartzCore/QuartzCore.h>
#import <OpenGLES/EAGLDrawable.h>
@implementation ScreenCapture
+ (UIImage *) GLViewToImage:(GLView *)glView
{
UIImage *glImage = [GLView snapshot:glView]; // returns an autoreleased image
return glImage;
}
+ (UIImage *) GLViewToImage:(GLView *)glView withOverlayImage:(UIImage *)overlayImage
{
UIImage *glImage = [GLView snapshot:glView]; // returns an autoreleased image
// Merge Image and Overlay
//
CGRect imageRect = CGRectMake((CGFloat)0.0, (CGFloat)0.0, glImage.size.width*glImage.scale, glImage.size.height*glImage.scale);
CGImageRef overlayCopy = CGImageCreateCopy( overlayImage.CGImage );
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, (int)glImage.size.width*glImage.scale, (int)glImage.size.height*glImage.scale, 8, (int)glImage.size.width*4*glImage.scale, colorSpace, kCGImageAlphaPremultipliedLast);
CGContextDrawImage(context, imageRect, glImage.CGImage);
CGContextDrawImage(context, imageRect, overlayCopy);
CGImageRef newImage = CGBitmapContextCreateImage(context);
UIImage *combinedViewImage = [[[UIImage alloc] initWithCGImage:newImage] autorelease];
CGImageRelease(newImage);
CGImageRelease(overlayCopy);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return combinedViewImage;
}
+ (UIImage *) UIViewToImage:(UIView *)view withOverlayImage:(UIImage *)overlayImage
{
UIImage *viewImage = [ScreenCapture UIViewToImage:view]; // returns an autoreleased image
// Merge Image and Overlay
//
CGRect imageRect = CGRectMake((CGFloat)0.0, (CGFloat)0.0, viewImage.size.width*viewImage.scale, viewImage.size.height*viewImage.scale);
CGImageRef overlayCopy = CGImageCreateCopy( overlayImage.CGImage );
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, (int)viewImage.size.width*viewImage.scale, (int)viewImage.size.height*viewImage.scale, 8, (int)viewImage.size.width*4*viewImage.scale, colorSpace, kCGImageAlphaPremultipliedLast);
CGContextDrawImage(context, imageRect, viewImage.CGImage);
CGContextDrawImage(context, imageRect, overlayCopy);
CGImageRef newImage = CGBitmapContextCreateImage(context);
UIImage *combinedViewImage = [[[UIImage alloc] initWithCGImage:newImage] autorelease];
CGImageRelease(newImage);
CGImageRelease(overlayCopy);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return combinedViewImage;
}
+ (UIImage *) UIViewToImage:(UIView *)view
{
// Create a graphics context with the target size
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
//
// CGSize imageSize = [[UIScreen mainScreen] bounds].size;
CGSize imageSize = CGSizeMake( (CGFloat)480.0, (CGFloat)640.0 ); // camera image size
if (NULL != UIGraphicsBeginImageContextWithOptions)
UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0);
else
UIGraphicsBeginImageContext(imageSize);
CGContextRef context = UIGraphicsGetCurrentContext();
// Start with the view...
//
CGContextSaveGState(context);
CGContextTranslateCTM(context, [view center].x, [view center].y);
CGContextConcatCTM(context, [view transform]);
CGContextTranslateCTM(context,-[view bounds].size.width * [[view layer] anchorPoint].x,-[view bounds].size.height * [[view layer] anchorPoint].y);
[[view layer] renderInContext:context];
CGContextRestoreGState(context);
// ...then repeat for every subview from back to front
//
for (UIView *subView in [view subviews])
{
if ( [subView respondsToSelector:@selector(screen)] )
if ( [(UIWindow *)subView screen] == [UIScreen mainScreen] )
continue;
CGContextSaveGState(context);
CGContextTranslateCTM(context, [subView center].x, [subView center].y);
CGContextConcatCTM(context, [subView transform]);
CGContextTranslateCTM(context,-[subView bounds].size.width * [[subView layer] anchorPoint].x,-[subView bounds].size.height * [[subView layer] anchorPoint].y);
[[subView layer] renderInContext:context];
CGContextRestoreGState(context);
}
UIImage *image = UIGraphicsGetImageFromCurrentImageContext(); // autoreleased image
UIGraphicsEndImageContext();
return image;
}
+ (UIImage *) snapshot:(GLView *)eaglview
{
NSInteger x = 0;
NSInteger y = 0;
NSInteger width = [eaglview backingWidth];
NSInteger height = [eaglview backingHeight];
NSInteger dataLength = width * height * 4;
NSUInteger i;
for ( i=0; i<100; i++ )
{
glFlush();
CFRunLoopRunInMode(kCFRunLoopDefaultMode, (float)1.0/(float)60.0, FALSE);
}
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
//
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
//
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast, ref, NULL, true, kCGRenderingIntentDefault);
// OpenGL ES measures data in PIXELS
// Create a graphics context with the target size measured in POINTS
//
NSInteger widthInPoints;
NSInteger heightInPoints;
if (NULL != UIGraphicsBeginImageContextWithOptions)
{
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// Set the scale parameter to your OpenGL ES view's contentScaleFactor
// so that you get a high-resolution snapshot when its value is greater than 1.0
//
CGFloat scale = eaglview.contentScaleFactor;
widthInPoints = width / scale;
heightInPoints = height / scale;
UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
}
else
{
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
//
widthInPoints = width;
heightInPoints = height;
UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
}
CGContextRef cgcontext = UIGraphicsGetCurrentContext();
// UIKit coordinate system is upside down to GL/Quartz coordinate system
// Flip the CGImage by rendering it to the flipped bitmap context
// The size of the destination area is measured in POINTS
//
CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);
// Retrieve the UIImage from the current context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext(); // autoreleased image
UIGraphicsEndImageContext();
// Clean up
free(data);
CFRelease(ref);
CFRelease(colorspace);
CGImageRelease(iref);
return image;
}
@end
关于iPhone 使用 AVCaptureVideoPreviewLayer 拍摄增强现实屏幕截图,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/8980847/
我的一个页面上有一个 map 控件,我希望拍摄它的照片并将其显示在不同的屏幕上。有谁知道我怎样才能实现这一目标?该 map 位于 范围内. 最佳答案 您应该使用 WriteableBitmap 类的
我正在使用 Qt 4.5 和 qgraphicsscene/view 向用户显示视频。 我想提供一个“获取快照”按钮,我相信有一种非常简单的方法可以让我想到的一切都简单得多。 我怎样才能优雅地做到这一
使用以下代码拍摄 AGSMapView 的快照 此代码将设置 kEAGLDrawablePropertyRetainedBacking 属性,这是必需的,否则它将拍摄白色快照。 let layer =
我目前正在使用 direct X 用 c++ 编写游戏,目前有一个 vector 来存储绘制我所有的 Sprite 。我无法让子弹在 vector 中工作。子弹射了,但应该有 50 颗却只射出一颗 /
如何获取安卓手机的打印屏幕??我正在用 GPS 做一个应用程序。所以我无法使用模拟器拍摄打印照片。有没有应用程序可以做到这一点? 最佳答案 通过 USB 连接您的手机,转到 Eclipse 中的调试透
我有以下 JSON Firebase 数据库: { "fruits": { "apple": { "name": "Gala", "ur
我正在做一个项目,它应该获取 iPhone 中视频图标内的视频。任何人都可以通过提供链接或其他方式来帮助我吗?如有任何帮助,我们将不胜感激。 谢谢 最佳答案 这是不可能的,因为我们无法从一个应用程序内
如何链接到嵌入 Flash 视频的快照而不是实际的 Flash 视频,以减少网站的加载时间? 最佳答案 是的,事实上这是一种经常使用的方法。您从带有播放按钮覆盖层的图像开始。单击后,图像元素将替换为播
我正在用这个截取 avPlayer 的屏幕截图.. print(startTime) print(Float64(startTime)) var time: CMTime =
我试图从 nib 加载一个 View ,配置它,并拍摄它的快照,而不是将它添加到屏幕上: + (void)composeFromNibWithImage:(UIImage*)catImage comp
我正在制作一个 tvOS 应用程序,我希望它看起来与电影应用程序相似。因此我有一个 UICollectionView。现在我的单元格不仅仅是简单的 UIImageView,而是更复杂一些。 我仍然想要
Apple 建议从提交给应用商店的屏幕截图中裁剪掉状态栏。在预览版中手动执行此操作非常繁琐且容易出错。 是否有任何开发人员有任何最佳实践建议或自动化技术来加速此过程?目标是将 iPad 和/或 iPh
我所有的搜索都涉及更一般的 arc/sin/cos 用法或射击到鼠标位置。我希望用键盘瞄准并发射射弹,并且作为网络类(class)中的菜鸟做一个项目,我已经从头开始做了很多工作,但我陷入了困境。我当前
假设我有一个服务: public interface ICustomersService { IObservable Customers { get; } }
在我的 IOS 应用程序中,我正在截取 UIImageView 的屏幕截图。如附件照片所示,它非常完美。但是在这里,我采用了 UIImageView content mode aspect fit 。
我正在使用 WebRTC 在两个用户之间建立视频聊天。我想拍一张显示其中一个人的 localView View 。 这是我的类,它使用 configureLocalPreview 方法将视频流与 UI
EBS 文档指出: As an example, volumes that operate with 20 GB or less of modified data since their most r
import qualified Data.Vector as V import Control.Lens import Data.Vector.Lens v = V.fromList [V.from
我在我的应用程序中的“查看”按钮中有一个用于使用以下代码拍摄快照的按钮 open func takeScreenshot(_ shouldSave: Bool = true) -> UIImage?
我基本上是在尝试在我的 Activity 中捕获相对布局的屏幕截图并将其保存为 PNG 图像。然后我需要将它作为位图文件检索。我编写了一些代码来完成此操作,但遗憾的是屏幕截图未保存在我的设备上。 XM
我是一名优秀的程序员,十分优秀!