gpt4 book ai didi

ios - 使用 swift 3 上的相机进行实时人脸检测

转载 作者:行者123 更新时间:2023-12-01 18:39:13 25 4
gpt4 key购买 nike

如何像“相机”那样实时进行人脸检测?像脸上和脸上的白色圆形。我用 AVCapturSession .我发现我保存的用于面部检测的图像。下面我附上了我当前的代码。它只在我按下按钮时捕获图像并将其保存到照片库中。请帮我根据人脸实时创建圆形!

代码

class CameraFaceRecongnitionVC: UIViewController {

@IBOutlet weak var imgOverlay: UIImageView!
@IBOutlet weak var btnCapture: UIButton!

let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?

// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?

override func viewDidLoad() {
super.viewDidLoad()
btnCapture.CameraButton()
roundButton.RoundButtonForFaceRecong()

// Do any additional setup after loading the view, typically from a nib.
captureSession.sessionPreset = AVCaptureSessionPresetHigh

if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the front camera
if(device.position == AVCaptureDevicePosition.front) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}

@IBAction func actionCameraCapture(_ sender: AnyObject) {

print("Camera button pressed")
saveToCamera()
}

func beginSession() {

do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]

if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}

}
catch {
print("error: \(error.localizedDescription)")
}

guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}

self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()

// self.view.addSubview(navigationBar)
self.view.addSubview(imgOverlay)
self.view.addSubview(btnCapture)
}

func saveToCamera() {

if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {

stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {

if let cameraImage = UIImage(data: imageData) {

UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
}
}
})
}
}

override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}

}

最佳答案

Swift 3



我找到了一个使用 AVFoundation 的解决方案,可以在 iOS 上实时创建方脸跟踪。我在这里修改了一些代码。
import UIKit
import AVFoundation

class DetailsView: UIView {
func setup() {
layer.borderColor = UIColor.red.withAlphaComponent(0.7).cgColor
layer.borderWidth = 5.0
}
}


class ViewController: UIViewController {

let stillImageOutput = AVCaptureStillImageOutput()

var session: AVCaptureSession?
var stillOutput = AVCaptureStillImageOutput()
var borderLayer: CAShapeLayer?

let detailsView: DetailsView = {
let detailsView = DetailsView()
detailsView.setup()

return detailsView
}()

lazy var previewLayer: AVCaptureVideoPreviewLayer? = {
var previewLay = AVCaptureVideoPreviewLayer(session: self.session!)
previewLay?.videoGravity = AVLayerVideoGravityResizeAspectFill

return previewLay
}()

lazy var frontCamera: AVCaptureDevice? = {
guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else { return nil }

return devices.filter { .position == .front }.first
}()

let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy : CIDetectorAccuracyLow])

override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
previewLayer?.frame = view.frame
}

override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
guard let previewLayer = previewLayer else { return }

view.layer.addSublayer(previewLayer)
view.addSubview(detailsView)
view.bringSubview(toFront: detailsView)
}

override func viewDidLoad() {
super.viewDidLoad()
sessionPrepare()
session?.startRunning()
}
//function to store image
func saveToCamera() {

if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {

stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {

if let cameraImage = UIImage(data: imageData) {

UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
}
}
})
}
}
}

extension ViewController {

func sessionPrepare() {
session = AVCaptureSession()

guard let session = session, let captureDevice = frontCamera else { return }

session.sessionPreset = AVCaptureSessionPresetPhoto


do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
session.beginConfiguration()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]

if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
}

if session.canAddInput(deviceInput) {
session.addInput(deviceInput)
}

let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)]

output.alwaysDiscardsLateVideoFrames = true

if session.canAddOutput(output) {
session.addOutput(output)
}

session.commitConfiguration()

let queue = DispatchQueue(label: "output.queue")
output.setSampleBufferDelegate(self, queue: queue)

} catch {
print("error with creating AVCaptureDeviceInput")
}
}
}

extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
let ciImage = CIImage(cvImageBuffer: pixelBuffer!, options: attachments as! [String : Any]?)
let options: [String : Any] = [CIDetectorImageOrientation: exifOrientation(orientation: UIDevice.current.orientation),
CIDetectorSmile: true,
CIDetectorEyeBlink: true]
let allFeatures = faceDetector?.features(in: ciImage, options: options)

let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)
let cleanAperture = CMVideoFormatDescriptionGetCleanAperture(formatDescription!, false)

guard let features = allFeatures else { return }

for feature in features {
if let faceFeature = feature as? CIFaceFeature {
let faceRect = calculateFaceRect(facePosition: faceFeature.mouthPosition, faceBounds: faceFeature.bounds, clearAperture: cleanAperture)
update(with: faceRect)
}
}

if features.count == 0 {
DispatchQueue.main.async {
self.detailsView.alpha = 0.0
}
}

}

func exifOrientation(orientation: UIDeviceOrientation) -> Int {
switch orientation {
case .portraitUpsideDown:
return 8
case .landscapeLeft:
return 3
case .landscapeRight:
return 1
default:
return 6
}
}

func videoBox(frameSize: CGSize, apertureSize: CGSize) -> CGRect {
let apertureRatio = apertureSize.height / apertureSize.width
let viewRatio = frameSize.width / frameSize.height

var size = CGSize.zero

if (viewRatio > apertureRatio) {
size.width = frameSize.width
size.height = apertureSize.width * (frameSize.width / apertureSize.height)
} else {
size.width = apertureSize.height * (frameSize.height / apertureSize.width)
size.height = frameSize.height
}

var videoBox = CGRect(origin: .zero, size: size)

if (size.width < frameSize.width) {
videoBox.origin.x = (frameSize.width - size.width) / 2.0
} else {
videoBox.origin.x = (size.width - frameSize.width) / 2.0
}

if (size.height < frameSize.height) {
videoBox.origin.y = (frameSize.height - size.height) / 2.0
} else {
videoBox.origin.y = (size.height - frameSize.height) / 2.0
}

return videoBox
}

func calculateFaceRect(facePosition: CGPoint, faceBounds: CGRect, clearAperture: CGRect) -> CGRect {
let parentFrameSize = previewLayer!.frame.size
let previewBox = videoBox(frameSize: parentFrameSize, apertureSize: clearAperture.size)

var faceRect = faceBounds

swap(&faceRect.size.width, &faceRect.size.height)
swap(&faceRect.origin.x, &faceRect.origin.y)

let widthScaleBy = previewBox.size.width / clearAperture.size.height
let heightScaleBy = previewBox.size.height / clearAperture.size.width

faceRect.size.width *= widthScaleBy
faceRect.size.height *= heightScaleBy
faceRect.origin.x *= widthScaleBy
faceRect.origin.y *= heightScaleBy

faceRect = faceRect.offsetBy(dx: 0.0, dy: previewBox.origin.y)
let frame = CGRect(x: parentFrameSize.width - faceRect.origin.x - faceRect.size.width - previewBox.origin.x / 2.0, y: faceRect.origin.y, width: faceRect.width, height: faceRect.height)

return frame
}

}
extension ViewController {
func update(with faceRect: CGRect) {
DispatchQueue.main.async {
UIView.animate(withDuration: 0.2) {
self.detailsView.alpha = 1.0
self.detailsView.frame = faceRect
}
}
}
}

** 编辑 *******

Swift 4



Apple 自己的 Vision 框架可用于从 Swift 4 实时检测人脸。 click the link用于文档和示例应用程序。

关于ios - 使用 swift 3 上的相机进行实时人脸检测,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/46068383/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com