gpt4 book ai didi

ios - AVCapturePhotoOutput iOS 相机超暗

转载 作者:可可西里 更新时间:2023-11-01 01:58:41 25 4
gpt4 key购买 nike

我有一个应用程序设置,可以使用相机拍摄照片(基于计时器)来检测人脸的存在。当我向应用程序提供我已添加到 Assets 中的照片时,检测过程运行良好。然而,当我尝试直接使用相机的输出,甚至在将图像保存到文件后,生成的图像太暗以至于面部识别完全不可靠。

如果我显示相机看到的图像,它看起来是正确的。我捕获了以下两张图像 - 一张来自实时看到的相机,另一张是从 AVCapturePhotoOutput 创建图像后的相同 View 。如果我只是在 ImageView 中显示捕获的图像,也会出现同样的黑暗。

请注意评论:“我将断点放在这里并截取了屏幕截图”。然后我在代码完成时拍摄了第二个屏幕截图。这些是在高光下拍摄的。 enter image description here enter image description here这是基本代码:

class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {

var sentBy : String?

//timers
var faceTimer : Timer?
var frvcTimer : Timer?

//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?

var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?

var image : UIImage?
var outputImage : UIImage?
@IBOutlet weak var imageView: UIImageView!

//MARK: - Setup

override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad

override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear

override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)

//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {

setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()

startRunningCaptureSession()

photoOutput?.capturePhoto(with:settings, delegate: self)

} else {
print("Camera not present")
}

}//viewDidAppear

//MARK: - Video

@objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView

func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession

func setupDevices() {

let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)

let devices = deviceDiscoverySession.devices
for device in devices {

if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else

}//for in

currentCamera = frontCamera

}//setupDevices

func setupInputOutput() {

do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch

}//setupInputOutput

func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer


func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession


//MARK: - Segue

override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue

}//prepare


//MARK: - Look for Faces

func findTheFaces() {
let myView : UIView = self.view

guard let outImage = outputImage else {return}

let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit

let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height

imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue

myView.addSubview(imageView)

let request = VNDetectFaceRectanglesRequest { (req, err) in

if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err

print(req.results ?? "req.results is empty")

req.results?.forEach({ (res) in

DispatchQueue.main.async {

guard let faceObservation = res as? VNFaceObservation else {return}

let x = myView.frame.width * faceObservation.boundingBox.origin.x

let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height

let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height

let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)

print("faceObservation bounding box:")
print(faceObservation.boundingBox)

//if you get here, then you have a face bounding box

}//main
})//forEach block


}//let request

guard let cgImage = outImage.cgImage else {return}

DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])

do {
try handler.perform([request])

print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)

} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue

}//findTheFaces

//MARK: - Memory

override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning

}//class


extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {

if let imageData = photo.fileDataRepresentation() {

print(imageData)
outputImage = UIImage(data : imageData)

//
//I put breakpoint here and took a screen shot
//

if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}

DispatchQueue.main.async {
self.findTheFaces()
}

}//if let imageData
}//photoOutput

}//extension

extension UIImage {

//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}

UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide

}//image

我一定是在拍摄相机时做错了什么。任何帮助,将不胜感激。 swift 4、iOS 11.2.5、Xcode 9.2

最佳答案

我会尝试在 startRunningCaptureSession()photoOutput?.capturePhoto(with:settings, delegate: self) 之间添加延迟

例如,

DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), 执行: {
//拍张照片
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})

关于ios - AVCapturePhotoOutput iOS 相机超暗,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/48478430/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com