gpt4 book ai didi

Python 卡在 pipe.stdin.write(image.tostring())

转载 作者:行者123 更新时间:2023-12-02 16:27:25 25 4
gpt4 key购买 nike

我正在阅读视频的每一帧并为其添加时间戳,如下所示。

command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]
#Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)

print('Processing....')
print(' ')
#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds = elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())
print('frame number',current_frame)
else:
print('video reader fail')
video.release()
pipe.stdin.close()
pipe.stderr.close()

但是,在大约 18k 帧之后,Python 卡在“pipe.stdin.write(image.tostring())”。它不会产生任何错误,只是挂起。如何解决这个问题?

提前致谢。

最佳答案

我想我解开了这个谜题:

stderr 缓冲区已满,进程卡住。
我设法在 Windows 10 下重现了这个问题。

  • FFmpeg 不时将状态写入stderr
  • 您正在使用 stderr=sp.PIPE,但不是从 stderr 读取数据。
  • 在对许多帧进行编码后,stderr 缓冲区已满,进程卡住了。

您可以删除 stderr=sp.PIPE,或者确保从 stderr 中读取数据。

stderr 中读取数据可以使用线程执行:

# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while True:
try:
stderr_output = pipe.stderr.readline()
except:
pass

我创建了一个生成合成视频文件的“自包含”代码示例,并使用合成视频作为输入来执行代码。

这里是测试代码示例:

import numpy as np
import cv2
import subprocess as sp
import threading
import datetime as dt

# Generate synthetic video file - resolution 640x480, 30000 frames, 1 fps
# H.264 encoded video (for testing):
#########################################################################
input_name = 'test.mp4'
width, height = 640, 480
total_frames = 30000
sp.run('ffmpeg -y -f lavfi -i testsrc=size={}x{}:rate=1 -vcodec libx264 -crf 23 -t {} {}'.format(width, height, total_frames, input_name))
#########################################################################


# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while keep_drain_stderr:
try:
stderr_output = pipe.stderr.readline()
except:
pass


Output_name = 'out.mp4'

command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]

# Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)

keep_drain_stderr = True
thread = threading.Thread(target=drain_stderr)
thread.start()


# Open video file for reading
video = cv2.VideoCapture(input_name)

print('Processing....')
print(' ')

#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
initial = dt.timedelta(microseconds=0*1000)
current_frame = 0
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds=elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())

print('frame number', current_frame)
else:
print('video reader fail')

keep_drain_stderr = False
video.release()
pipe.stdin.close()
pipe.stderr.close()

#Wait 3 seconds before killing FFmpeg
try:
pipe.wait(3)
except (sp.TimeoutExpired):
pipe.kill()

thread.join()

关于Python 卡在 pipe.stdin.write(image.tostring()),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/60606499/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com