gpt4 book ai didi

python - Opencv流媒体太慢

转载 作者:行者123 更新时间:2023-12-02 17:40:40 26 4
gpt4 key购买 nike

我已经用 tensorflow 对我的模型进行了重新训练,以适应诗人的初始模型。预测需要0.4秒,排序需要2秒。由于需要花费很长时间,因此帧比较滞后,并且在预测时会被打乱。尽管预测需要时间,但是否可以通过任何方式使帧平滑?
以下是我的代码...

camera = cv2.VideoCapture(0)

# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile('retrained_labels.txt')]

def grabVideoFeed():
grabbed, frame = camera.read()
return frame if grabbed else None

def initialSetup():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
start_time = timeit.default_timer()

# This takes 2-5 seconds to run
# Unpersists graph from file
with tf.gfile.FastGFile('retrained_graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')

print 'Took {} seconds to unpersist the graph'.format(timeit.default_timer() - start_time)

initialSetup()

with tf.Session() as sess:
start_time = timeit.default_timer()

# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')

print 'Took {} seconds to feed data to graph'.format(timeit.default_timer() - start_time)

while True:
frame = grabVideoFeed()

if frame is None:
raise SystemError('Issue grabbing the frame')

frame = cv2.resize(frame, (299, 299), interpolation=cv2.INTER_CUBIC)

cv2.imshow('Main', frame)

# adhere to TS graph input structure
numpy_frame = np.asarray(frame)
numpy_frame = cv2.normalize(numpy_frame.astype('float'), None, -0.5, .5, cv2.NORM_MINMAX)
numpy_final = np.expand_dims(numpy_frame, axis=0)

start_time = timeit.default_timer()

# This takes 2-5 seconds as well
predictions = sess.run(softmax_tensor, {'Mul:0': numpy_final})

print 'Took {} seconds to perform prediction'.format(timeit.default_timer() - start_time)

start_time = timeit.default_timer()

# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]

print 'Took {} seconds to sort the predictions'.format(timeit.default_timer() - start_time)

for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))

print '********* Session Ended *********'

if cv2.waitKey(1) & 0xFF == ord('q'):
sess.close()
break

camera.release()
cv2.destroyAllWindows()

最佳答案

@ dat-tran是正确的,尽管rcnn更快,但是也会有些滞后。可以使用yolo,ssd模型,没有滞后,我使用过yolo很好。

对于队列和多处理,可以使用以下代码。

from utils import FPS, WebcamVideoStream
from multiprocessing import Process, Queue, Pool

def worker(input_q, output_q):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
start_time = timeit.default_timer()
# This takes 2-5 seconds to run
# Unpersists graph from file

graph_def = tf.Graph()
with graph_def.as_default():
graph_def_ = tf.GraphDef()
with tf.gfile.FastGFile('retrained_graph.pb', 'rb') as f:
graph_def_.ParseFromString(f.read())
tf.import_graph_def(graph_def_, name='')

sess = tf.Session(graph=graph_def)

fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
numpy_frame = np.asarray(frame)
numpy_frame = cv2.normalize(numpy_frame.astype('float'), None, -0.5, .5, cv2.NORM_MINMAX)
numpy_final = np.expand_dims(numpy_frame, axis=0)

start_time = timeit.default_timer()

# This takes 2-5 seconds as well
predictions = sess.run(softmax_tensor, {'Mul:0': numpy_final})

print 'Took {} seconds to perform prediction'.format(timeit.default_timer() - start_time)

start_time = timeit.default_timer()

# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]

print 'Took {} seconds to sort the predictions'.format(timeit.default_timer() - start_time)

for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))

output_q.put(frame)

fps.stop()
sess.close()

if __name__ == '__main__':
input_q = Queue(maxsize=10)
output_q = Queue(maxsize=10)

process = Process(target=worker, args=((input_q, output_q)))
process.daemon = True
pool = Pool(1, worker, (input_q, output_q))

video_capture = WebcamVideoStream(src=0,
width=args.width,
height=args.height).start()

fps = FPS().start()

while (video_capture.isOpened()):
_,frame = video_capture.read()
input_q.put(frame)
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Image', 600, 600)
cv2.imshow('Image', output_q.get())
fps.update()
if cv2.waitKey(1) & 0xFF == ord('q'):
break

fps.stop()

关于python - Opencv流媒体太慢,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45409937/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com