gpt4 book ai didi

ios - 如何在 iOS 上将深度数据捕获为 kCVPixelFormatType_DepthFloat16?

转载 作者:行者123 更新时间:2023-12-01 19:43:51 25 4
gpt4 key购买 nike

以下代码配置 TrueDepth 相机以提供深度数据。

configureCaptureDevices() AVCaptureDevice配置为在 kCVPixelFormatType_DepthFloat16 中提供深度数据或 kCVPixelFormatType_DepthFloat32格式。

但是,当我调用 CVPixelBufferGetPixelFormatType(depthMap)结果CVPixelBuffer缓冲区的类型总是设置为 kCVPixelFormatType_DisparityFloat16
如何让 depthMap 在 kCVPixelFormatType_DepthFloat16 中?

import AVFoundation
import UIKit

class CameraController: NSObject {

var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?

var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()

func prepare(completionHandler: @escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd

// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})

do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()

// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()

// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()

photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)


guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()

captureSession.startRunning()
}

DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}

catch {
DispatchQueue.main.async {
completionHandler(error)
}

return
}

DispatchQueue.main.async {
completionHandler(nil)
}
}
}

func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }

self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait

view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}


func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: @escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}

var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}

extension CameraController {
public enum CameraPosition {
case front
case rear
}

enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

最佳答案

AVDepthData有方法converting(toDepthDataType:) .只需调用:

avDepth.converting(toDepthDataType: kCVPixelFormatType_DepthFloat16)

关于ios - 如何在 iOS 上将深度数据捕获为 kCVPixelFormatType_DepthFloat16?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/51383224/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com