gpt4 book ai didi

python - Raspberry Pi 4 的相机与 OpenCV 集成

转载 作者:行者123 更新时间:2023-12-05 07:13:33 31 4
gpt4 key购买 nike

我在 Raspberry Pi 4 model B 上运行 openCV 进行一些车辆检测。我购买了 IDS 摄像头:https://en.ids-imaging.com/download-ueye-emb-hardfloat.html但是将它集成到我的代码中证明太麻烦了,因为 OpenCV.VideoCapture 无法检测到任何设备,并且 ueye python 库不能即插即用,在网上找到的任何示例代码都可以将它集成到 OpenCV 中,在我的 Win PC 上运行良好但导致内存Pi 上的泄漏和段错误。

有没有关于在 Rasberry Pi 4 上与 OpenCV 良好集成的相机的建议,而无需经历分配内存等的麻烦?

添加集成尝试:

from pyueye import ueye
import numpy as np
import cv2
import sys

class Rect:
def __init__(self, x=0, y=0, width=0, height=0):
self.x = x
self.y = y
self.width = width
self.height = height


#---------------------------------------------------------------------------------------------------------------------------------------
class UeyeCamSetup():
#Variables
hCam = ueye.HIDS(0) #0: first available camera; 1-254: The camera with the specified camera ID
sInfo = ueye.SENSORINFO()
cInfo = ueye.CAMINFO()
pcImageMemory = ueye.c_mem_p()
MemID = ueye.int()
rectAOI = ueye.IS_RECT()
pitch = ueye.INT()
nBitsPerPixel = ueye.INT(24) #24: bits per pixel for color mode; take 8 bits per pixel for monochrome
channels = 3 #3: channels for color mode(RGB); take 1 channel for monochrome
m_nColorMode = ueye.INT() # Y8/RGB16/RGB24/REG32
bytes_per_pixel = int(nBitsPerPixel / 8)
nRet = None
width = None
height = None
#---------------------------------------------------------------------------------------------------------------------------------------
def startUeye(self):

# Starts the driver and establishes the connection to the camera
nRet = ueye.is_InitCamera(self.hCam, None)
if nRet != ueye.IS_SUCCESS:
print("is_InitCamera ERROR")

# Reads out the data hard-coded in the non-volatile camera memory and writes it to the data structure that cInfo points to
nRet = ueye.is_GetCameraInfo(self.hCam, self.cInfo)
if nRet != ueye.IS_SUCCESS:
print("is_GetCameraInfo ERROR")

# You can query additional information about the sensor type used in the camera
nRet = ueye.is_GetSensorInfo(self.hCam, self.sInfo)
if nRet != ueye.IS_SUCCESS:
print("is_GetSensorInfo ERROR")

nRet = ueye.is_ResetToDefault(self.hCam)
if nRet != ueye.IS_SUCCESS:
print("is_ResetToDefault ERROR")

# Set display mode to DIB
nRet = ueye.is_SetDisplayMode(self.hCam, ueye.IS_SET_DM_DIB)

# Set the right color mode
if int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_BAYER:
# setup the color depth to the current windows setting
ueye.is_GetColorDepth(self.hCam, self.nBitsPerPixel, self.m_nColorMode)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_BAYER: ", )
print("\tm_nColorMode: \t\t", self.m_nColorMode)
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel)
print("\tbytes_per_pixel: \t", self.bytes_per_pixel)
print()

elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_CBYCRY:
# for color camera models use RGB32 mode
self.m_nColorMode = ueye.IS_CM_BGRA8_PACKED
self.nBitsPerPixel = ueye.INT(32)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_CBYCRY: ", )
print("\tm_nColorMode: \t\t", self.m_nColorMode)
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel)
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel)
print()

elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_MONOCHROME:
# for color camera models use RGB32 mode
self.m_nColorMode = ueye.IS_CM_MONO8
self.nBitsPerPixel = ueye.INT(8)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_MONOCHROME: ", )
print("\tm_nColorMode: \t\t", self.m_nColorMode)
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel)
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel)
print()

else:
# for monochrome camera models use Y8 mode
self.m_nColorMode = ueye.IS_CM_MONO8
self.nBitsPerPixel = ueye.INT(8)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("else")

# Can be used to set the size and position of an "area of interest"(AOI) within an image
nRet = ueye.is_AOI(self.hCam, ueye.IS_AOI_IMAGE_GET_AOI, self.rectAOI, ueye.sizeof(self.rectAOI))
if nRet != ueye.IS_SUCCESS:
print("is_AOI ERROR")

self.width = self.rectAOI.s32Width
self.height = self.rectAOI.s32Height

# Prints out some information about the camera and the sensor
print("Camera model:\t\t", self.sInfo.strSensorName.decode('utf-8'))
print("Camera serial no.:\t", self.cInfo.SerNo.decode('utf-8'))
print("Maximum image width:\t", self.width)
print("Maximum image height:\t", self.height)
print()

#---------------------------------------------------------------------------------------------------------------------------------------

# Allocates an image memory for an image having its dimensions defined by width and height and its color depth defined by nBitsPerPixel
nRet = ueye.is_AllocImageMem(self.hCam, self.width, self.height, self.nBitsPerPixel, self.pcImageMemory, self.MemID)
if nRet != ueye.IS_SUCCESS:
print("is_AllocImageMem ERROR")
else:
# Makes the specified image memory the active memory
nRet = ueye.is_SetImageMem(self.hCam, self.pcImageMemory, self.MemID)
if nRet != ueye.IS_SUCCESS:
print("is_SetImageMem ERROR")
else:
# Set the desired color mode
nRet = ueye.is_SetColorMode(self.hCam, self.m_nColorMode)



# Activates the camera's live video mode (free run mode)
nRet = ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)
if nRet != ueye.IS_SUCCESS:
print("is_CaptureVideo ERROR")

# Enables the queue mode for existing image memory sequences
# nRet = ueye.is_InquireImageMem(self.hCam, self.pcImageMemory, self.MemID, self.width, self.height, self.nBitsPerPixel, self.pitch)
# if nRet != ueye.IS_SUCCESS:
# print("is_InquireImageMem ERROR")
# else:
# print("Press ESC to leave the programm")
self.nRet = nRet

def readOpenCVImg(self):
if(self.nRet == ueye.IS_SUCCESS):

# In order to display the image in an OpenCV window we need to...
# ...extract the data of our image memory
array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)

# bytes_per_pixel = int(nBitsPerPixel / 8)

# ...reshape it in an numpy array...
frame = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))

# ...resize the image by a half
return cv2.resize(frame,(0,0),fx=0.5, fy=0.5)
#---------------------------------------------------------------------------------------------------------------------------------------

def releaseCam(self):
# Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management
ueye.is_FreeImageMem(self.hCam, self.pcImageMemory, self.MemID)

# Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
ueye.is_ExitCamera(self.hCam)

print()
print("END")

尝试了这些开源库无济于事: https://en.ids-imaging.com/techtipps-detail/en_techtip-embedded-vision-kit.html https://en.ids-imaging.com/store/customer/account/login/referer/aHR0cHM6Ly9lbi5pZHMtaW1hZ2luZy5jb20vcHJvZ3JhbW1pbmctZXhhbXBsZXMuaHRtbA ,,

最佳答案

调用ueye.get_data时pitch为0。因此,numpy 数组的大小为 0。您可以通过调用 ueye.is_InquireImageMem 来获取间距。

x = ueye.int()
y = ueye.int()
bits = ueye.int()
pitch = ueye.int()
ueye.is_InquireImageMem(self.hCam, self.pcImageMemory, self.MemID, x, y, bits, pitch)

array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, pitch, copy=False)

我用 ueye 4.95 试过这段代码,它有效。

关于python - Raspberry Pi 4 的相机与 OpenCV 集成,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/60139711/

31 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com