gpt4 book ai didi

python - 如何使用 Python 和 PyObjC 从 Apple iSight 捕获帧?

转载 作者:太空狗 更新时间:2023-10-29 18:01:51 26 4
gpt4 key购买 nike

我正在尝试使用 Python(2.7 或 2.6 版)和 PyObjC(2.2 版)从 Macbook Pro 中内置的 Apple iSight 摄像头捕获单个帧。

作为起点,我使用了 this old StackOverflow问题。为了验证它是否有意义,我交叉引用了 Apple's MyRecorder它似乎基于的示例。不幸的是,我的脚本不起作用。

我的主要问题是:

  • 我是否正确初始化了相机?
  • 我是否正确启动了事件循环?
  • 还有其他我应该做的设置吗?

在下面粘贴的示例脚本中,预期的操作是在调用 startImageCapture() 之后,我应该开始打印来自 CaptureDelegate 的“Got a frame...”消息。但是,相机的灯永远不会打开,委托(delegate)的回调也永远不会执行。

另外,在startImageCapture()过程中也没有失败,所有函数都声称成功,并且成功找到了iSight设备。分析 pdb 中的 session 对象表明它具有有效的输入和输出对象,输出分配了委托(delegate),设备未被其他进程使用,并且 session 在调用 startRunning() 后标记为正在运行。

代码如下:

#!/usr/bin/env python2.7

import sys
import os
import time
import objc
import QTKit
import AppKit
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
objc.setVerbose(True)

class CaptureDelegate(NSObject):
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
# This should get called for every captured frame
print "Got a frame: %s" % videoFrame

class QuitClass(NSObject):
def quitMainLoop_(self, aTimer):
# Just stop the main loop.
print "Quitting main loop."
AppHelper.stopEventLoop()


def startImageCapture():
error = None

# Create a QT Capture session
session = QTKit.QTCaptureSession.alloc().init()

# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return

# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not session.addInput_error_(input, error):
print "Couldn't add input device."
return

# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
delegate = CaptureDelegate.alloc().init()
output.setDelegate_(delegate)
if not session.addOutput_error_(output, error):
print "Failed to add output delegate."
return

# Start the capture
print "Initiating capture..."
session.startRunning()


def main():
# Open camera and start capturing frames
startImageCapture()

# Setup a timer to quit in 10 seconds (hack for now)
quitInst = QuitClass.alloc().init()
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(10.0,
quitInst,
'quitMainLoop:',
None,
False)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)

print "After event loop"


if __name__ == "__main__":
main()

感谢您提供的任何帮助!

最佳答案

好吧,我花了一天时间深入 PyObjC 并使其正常工作。

为了将来记录,问题中的代码不起作用的原因:可变范围和垃圾收集session 变量在超出范围时被删除,这发生在事件处理器运行之前。必须采取一些措施来保留它,以便它在有时间运行之前不会被释放。

将所有内容移动到一个类中并将session 设为类变量使回调开始工作。此外,下面的代码演示了如何将帧的像素数据转换为位图格式并通过 Cocoa 调用将其保存,以及如何将其作为缓冲区或字符串复制回 Python 的世界观。

下面的脚本将捕获单帧

#!/usr/bin/env python2.7
#
# camera.py -- by Trevor Bentley (02/04/2011)
#
# This work is licensed under a Creative Commons Attribution 3.0 Unported License.
#
# Run from the command line on an Apple laptop running OS X 10.6, this script will
# take a single frame capture using the built-in iSight camera and save it to disk
# using three methods.
#

import sys
import os
import time
import objc
import QTKit
from AppKit import *
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper

class NSImageTest(NSObject):
def init(self):
self = super(NSImageTest, self).init()
if self is None:
return None

self.session = None
self.running = True

return self

def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
self.session.stopRunning() # I just want one frame

# Get a bitmap representation of the frame using CoreImage and Cocoa calls
ciimage = CIImage.imageWithCVImageBuffer_(videoFrame)
rep = NSCIImageRep.imageRepWithCIImage_(ciimage)
bitrep = NSBitmapImageRep.alloc().initWithCIImage_(ciimage)
bitdata = bitrep.representationUsingType_properties_(NSBMPFileType, objc.NULL)

# Save image to disk using Cocoa
t0 = time.time()
bitdata.writeToFile_atomically_("grab.bmp", False)
t1 = time.time()
print "Cocoa saved in %.5f seconds" % (t1-t0)

# Save a read-only buffer of image to disk using Python
t0 = time.time()
bitbuf = bitdata.bytes()
f = open("python.bmp", "w")
f.write(bitbuf)
f.close()
t1 = time.time()
print "Python saved buffer in %.5f seconds" % (t1-t0)

# Save a string-copy of the buffer to disk using Python
t0 = time.time()
bitbufstr = str(bitbuf)
f = open("python2.bmp", "w")
f.write(bitbufstr)
f.close()
t1 = time.time()
print "Python saved string in %.5f seconds" % (t1-t0)

# Will exit on next execution of quitMainLoop_()
self.running = False

def quitMainLoop_(self, aTimer):
# Stop the main loop after one frame is captured. Call rapidly from timer.
if not self.running:
AppHelper.stopEventLoop()

def startImageCapture(self, aTimer):
error = None
print "Finding camera"

# Create a QT Capture session
self.session = QTKit.QTCaptureSession.alloc().init()

# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return

# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not self.session.addInput_error_(input, error):
print "Couldn't add input device."
return

# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
output.setDelegate_(self)
if not self.session.addOutput_error_(output, error):
print "Failed to add output delegate."
return

# Start the capture
print "Initiating capture..."
self.session.startRunning()


def main(self):
# Callback that quits after a frame is captured
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(0.1,
self,
'quitMainLoop:',
None,
True)

# Turn on the camera and start the capture
self.startImageCapture(None)

# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)

print "Frame capture completed."

if __name__ == "__main__":
test = NSImageTest.alloc().init()
test.main()

关于python - 如何使用 Python 和 PyObjC 从 Apple iSight 捕获帧?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/4892555/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com