gpt4 book ai didi

python - 使用python-opencv进行质心跟踪以进行背景子跟踪

转载 作者:行者123 更新时间:2023-12-02 17:24:33 45 4
gpt4 key购买 nike

我正在使用backgoundsubtraction进行对象定位,并且将本教程https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/应用于实时流视频,并正确输出了视频。但是当我给出图像帧序列时,所有ID都将被打印在新图像上,并且旧ID和OLD ID不会被删除The current img output

下一帧将获得新的ID,并且先前的ID也将被打印在其上
我该如何解决

def detection():
backsub = cv2. createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
minarea = 50
counter = 0
counter = 0
counter1 = 0
ct = CentroidTracker()
rects = []
#cx = 0
#cy = 0
(H, W) = (None, None)
filenames = [img for img in glob.glob("img location/*.jpg")]
filenames.sort()
print("start2")
for img in filenames:
frame = cv2.imread(img)

t = time.localtime()
timestamp = int(round(time.time() * 1000))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(frame, 21)
#blur = cv2.GaussianBlur(frame, (21,21), 0)
fgmask = backsub.apply(blur)
fgmask[fgmask==127] = 0
thresh = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=4)
#cv2.line(frame, (138, 265), (344, 640), (0, 255, 0),1)
cv2.line(frame, (103, 209), (332, 607), (0, 255, 0),1)

#pts = np.array([[0, 640], [0, 264], [138, 265], [344, 640]], np.int32)
pts = np.array([[0, 607], [0, 215], [103, 209], [332, 607]], np.int32)

#bbPath = mplPath.Path(np.array([[0, 640], [0, 264], [138, 265], [344, 640]]))
bbPath = mplPath.Path(np.array([[0, 607], [0, 215], [103, 209], [332, 607]]))

frame = cv2.polylines(frame, [pts], True, (0, 255,0))
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
M = cv2.moments(c)
A = cv2.contourArea(c)
if cv2.contourArea(c) >= minarea:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
rectang = [x,y,(x + w),(y + h)]

rects.append(rectang)

if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
cv2.circle(frame, (cx, cy),5, (255, 255, 255), -1)
centroid1 = (cx, cy)

cv2.imwrite("Writelocation/I%s.jpg"%timestamp,frame)

print("Area is : ",A)
objects = ct.update(rects)
if object is not None:
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0], centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
cv2.imshow("fgmask", thresh)
cv2.imshow("img",frame)

if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imwrite("Writelocation/V%s.jpg"%timestamp,frame)


print("Done")
detection()

上面是BackgroundSubtraction pgm
以下是质心跟踪pgm
    from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np

class CentroidTracker():
def __init__(self, maxDisappeared=3):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()

self.maxDisappeared = maxDisappeared

def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1

def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]

def update(self, rects):
if len(rects) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1

if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)

return self.objects

inputCentroids = np.zeros((len(rects), 2), dtype="int")

for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)

if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])

else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())

D = dist.cdist(np.array(objectCentroids), inputCentroids)

rows = D.min(axis=1).argsort()

cols = D.argmin(axis=1)[rows]

usedRows = set()
usedCols = set()

for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue

objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0

usedRows.add(row)
usedCols.add(col)

unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)

if D.shape[0] >= D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1

if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)

else:
for col in unusedCols:
self.register(inputCentroids[col])

return self.objects

最佳答案

尝试将rects = []行放在第一个for循环内的detection()中,即在for img in filenames:行之后

关于python - 使用python-opencv进行质心跟踪以进行背景子跟踪,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/60183243/

45 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com