gpt4 book ai didi

ios - 你如何创建一个新的 AVAsset 视频,它只包含来自另一个视频的给定 `CMTimeRange` s 的帧?

转载 作者:行者123 更新时间:2023-12-04 17:07:55 28 4
gpt4 key购买 nike

苹果的示例代码Identifying Trajectories in Video包含以下委托(delegate)回调:

func cameraViewController(_ controller: CameraViewController, didReceiveBuffer buffer: CMSampleBuffer, orientation: CGImagePropertyOrientation) {
let visionHandler = VNImageRequestHandler(cmSampleBuffer: buffer, orientation: orientation, options: [:])

if gameManager.stateMachine.currentState is GameManager.TrackThrowsState {
DispatchQueue.main.async {
// Get the frame of rendered view
let normalizedFrame = CGRect(x: 0, y: 0, width: 1, height: 1)
self.jointSegmentView.frame = controller.viewRectForVisionRect(normalizedFrame)
self.trajectoryView.frame = controller.viewRectForVisionRect(normalizedFrame)
}
// Perform the trajectory request in a separate dispatch queue.
trajectoryQueue.async {
do {
try visionHandler.perform([self.detectTrajectoryRequest])
if let results = self.detectTrajectoryRequest.results {
DispatchQueue.main.async {
self.processTrajectoryObservations(controller, results)
}
}
} catch {
AppError.display(error, inViewController: self)
}
}
}
}

但是,我不是在 detectTrajectoryRequest.results 存在时绘制 UI ( https://developer.apple.com/documentation/vision/vndetecttrajectoriesrequest/3675672-results ),而是对使用每个结果提供的 CMTimeRange 来构造一个新的视频。实际上,这会将原始视频过滤成只有带轨迹的帧。

什么是仅将具有轨迹的帧从 AVAssetReader 传输到 AVAssetWriter 的好方法?

最佳答案

当您在捕获的视频帧或从文件解码的帧中识别出轨迹时,您的内存中可能不再有初始帧,因此创建仅包含轨迹的文件的最简单方法是保留原始文件手头,然后将其轨迹片段插入 AVComposition,然后使用 AVAssetExportSession 导出。

此示例从相机捕获帧,将它们编码到一个文件中,同时分析它们的轨迹,20 秒后,它关闭该文件,然后创建仅包含轨迹片段的新文件。

如果您对检测预先存在的文件中的轨迹感兴趣,重新连接此代码并不难。

import UIKit
import AVFoundation
import Vision

class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let session = AVCaptureSession()

var assetWriter: AVAssetWriter!
var assetWriterInput: AVAssetWriterInput!
var assetWriterStartTime: CMTime = .zero
var assetWriterStarted = false

var referenceFileURL: URL!
var timeRangesOfInterest: [Double : CMTimeRange] = [:]

func startWritingFile(outputURL: URL, initialSampleBuffer: CMSampleBuffer) {
try? FileManager.default.removeItem(at: outputURL)
assetWriter = try! AVAssetWriter(outputURL: outputURL, fileType: .mov)

let dimensions = initialSampleBuffer.formatDescription!.dimensions
assetWriterInput = AVAssetWriterInput(mediaType: .video, outputSettings: [AVVideoCodecKey: AVVideoCodecType.h264, AVVideoWidthKey: dimensions.width, AVVideoHeightKey: dimensions.height])

assetWriter.add(assetWriterInput)

assetWriter.startWriting()

self.assetWriterStartTime = CMSampleBufferGetPresentationTimeStamp(initialSampleBuffer)
assetWriter.startSession(atSourceTime: self.assetWriterStartTime)
}

func stopWritingFile(completion: @escaping (() -> Void)) {
let assetWriterToFinish = self.assetWriter!
self.assetWriterInput = nil
self.assetWriter = nil

assetWriterToFinish.finishWriting {
print("finished writing: \(assetWriterToFinish.status.rawValue)")
completion()
}
}

func exportVideoTimeRanges(inputFileURL: URL, outputFileURL: URL, timeRanges: [CMTimeRange]) {
let inputAsset = AVURLAsset(url: inputFileURL)
let inputVideoTrack = inputAsset.tracks(withMediaType: .video).first!

let composition = AVMutableComposition()

let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)!

var insertionPoint: CMTime = .zero
for timeRange in timeRanges {
try! compositionTrack.insertTimeRange(timeRange, of: inputVideoTrack, at: insertionPoint)
insertionPoint = insertionPoint + timeRange.duration
}

let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
try? FileManager.default.removeItem(at: outputFileURL)
exportSession.outputURL = outputFileURL
exportSession.outputFileType = .mov
exportSession.exportAsynchronously {
print("export finished: \(exportSession.status.rawValue) - \(exportSession.error)")
}
}

override func viewDidLoad() {
super.viewDidLoad()

let inputDevice = AVCaptureDevice.default(for: .video)!
let input = try! AVCaptureDeviceInput(device: inputDevice)
let output = AVCaptureVideoDataOutput()

output.setSampleBufferDelegate(self, queue: DispatchQueue.main)

session.addInput(input)
session.addOutput(output)

session.startRunning()

DispatchQueue.main.asyncAfter(deadline: .now() + 20) {
self.stopWritingFile {
print("finished writing")

let trajectoriesFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("trajectories.mov")

self.exportVideoTimeRanges(inputFileURL: self.referenceFileURL, outputFileURL: trajectoriesFileURL, timeRanges: self.timeRangesOfInterest.map { $0.1 })
}
}
}

// Lazily create a single instance of VNDetectTrajectoriesRequest.
private lazy var request: VNDetectTrajectoriesRequest = {
return VNDetectTrajectoriesRequest(frameAnalysisSpacing: .zero,
trajectoryLength: 10,
completionHandler: completionHandler)
}()

// AVCaptureVideoDataOutputSampleBufferDelegate callback.
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if !assetWriterStarted {
self.referenceFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("reference.mov")

startWritingFile(outputURL: self.referenceFileURL, initialSampleBuffer: sampleBuffer)
assetWriterStarted = true
}

if assetWriterInput != nil && assetWriterInput.isReadyForMoreMediaData {
assetWriterInput.append(sampleBuffer)
}

do {
let requestHandler = VNImageRequestHandler(cmSampleBuffer: sampleBuffer)
try requestHandler.perform([request])
} catch {
// Handle the error.
}
}

func completionHandler(request: VNRequest, error: Error?) {
guard let request = request as? VNDetectTrajectoriesRequest else { return }

if let results = request.results,
results.count > 0 {
NSLog("\(results)")
for result in results {
var fileRelativeTimeRange = result.timeRange
fileRelativeTimeRange.start = fileRelativeTimeRange.start - self.assetWriterStartTime
self.timeRangesOfInterest[fileRelativeTimeRange.start.seconds] = fileRelativeTimeRange
}
}
}
}

关于ios - 你如何创建一个新的 AVAsset 视频,它只包含来自另一个视频的给定 `CMTimeRange` s 的帧?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/70134980/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com