gpt4 book ai didi

python - 如何使用带有 python 子进程模块或 gst-launch-1.0 命令的 gstreamer 接收字节流?

转载 作者:行者123 更新时间:2023-12-04 22:56:42 25 4
gpt4 key购买 nike

我想通过使用带有 python 子进程模块的 gstreamer 来接收字节流。
现在我可以成功地使用 ffmpeg 拉取字节流。如下所示。

import cv2
import subprocess as sp


height = 714
width = 420
rtsp_url = 'rtsp://127.0.0.1:8554/video'

# command
command = ['ffmpeg',
'-i', rtsp_url,
'-f', 'rawvideo',
'-s',str(width)+'*'+str(height),
'-pix_fmt', 'bgr24',
'-fflags', 'nobuffer',
'-']

p = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)

while True:
raw_image = p.stdout.read(width*height*3)
image = np.fromstring(raw_image, dtype='uint8')
image = image.reshape((height,width,3)).copy()
cv2.imshow('image', image)
key = cv2.waitKey(20)
我想使用 gstreamer 命令而不是 ffmpeg。到目前为止,我已经实现了使用 gstreamer 命令行将字节流写入文件。
gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/video latency=0 drop-on-latency=true ! rtph264depay ! video/x-h264, stream-format='byte-stream' ! filesink location=/home/name/stdout
但它不能将字节流输出到管道,所以终端不显示字节流,不像 ffmpeg 命令。如何更改此命令以通过管道输出字节流,以便我可以从管道中读取。
感谢您花时间为我解答!
这是 RTSP 流代码。
import cv2
import time
import subprocess as sp
import numpy as np


rtsp_url = 'rtsp://127.0.0.1:8554/video'
video_path = r'test.mp4'
cap = cv2.VideoCapture(video_path)

# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('fps={}'.format(fps))

# command
command = ['ffmpeg',
'-re',
'-y',
'-stream_loop', '-1',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(width, height),
'-r', str(fps),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
# '-flags2', 'local_header',
'-bsf:v', "'dump_extra=freq=k'",
'-keyint_min', '60',
'-g', '60',
'-sc_threshold', '0',
'-f', 'rtsp',
'-rtsp_transport', 'tcp',
'-muxdelay', '0.1',
rtsp_url]

p = sp.Popen(command, stdin=sp.PIPE)

cnt = 0
t_start = time.time()
while (cap.isOpened()):
t_cur = time.time()-t_start

ret, frame = cap.read()
if not ret:
cnt += 1
print("count: {}".format(cnt))
cap = cv2.VideoCapture(video_path)
continue

p.stdin.write(frame.tobytes())

cv2.imshow('real_time', frame)

key = cv2.waitKey(20)
if key == 27:
p.terminate()
break

最佳答案

我设法创建了一个适用于 Linux 的示例。
我无法模拟 RTSP 相机,所以我使用 MP4 文件作为输入。
在 Python 中使用 FFmpeg CLI 创建 MP4 输入文件(用于测试):

sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))
GStreamer 命令是: p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)
  • --quiet使用 GStreamer 是因为 GStreamer 将消息打印到标准输出。
  • filesrc location ... 用于读取 MP4 输入 - 用 RTSP 管道替换它。
  • videoconvert ! capsfilter caps="video/x-raw, format=BGR"将视频格式转换为原始 BGR。
  • filesink location=/dev/stdout将输出重定向到标准输出(在 Linux 中)。

  • 代码示例:
    import cv2
    import numpy as np
    import subprocess as sp
    import shlex
    from sys import platform

    width = 714
    height = 420

    input_file_name = 'input.mp4' # For testing, use MP4 input file instead of RTSP input.

    # Build MP4 synthetic input video file for testing:
    sp.run(shlex.split(f'ffmpeg -y -f lavfi -i testsrc=size={width}x{height}:rate=25:duration=100 -vcodec libx264 -pix_fmt yuv420p {input_file_name}'))

    if platform == "win32":
    # stdout_file_name = "con:"
    # gstreamer_exe = 'c:/gstreamer/1.0/msvc_x86_64/bin/gst-launch-1.0.exe'
    raise Exception('win32 system is not supported')
    else:
    stdout_file_name = "/dev/stdout"
    gstreamer_exe = 'gst-launch-1.0'

    # https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
    p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet filesrc location={input_file_name} ! qtdemux ! video/x-h264 ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! filesink location={stdout_file_name}'), stdout=sp.PIPE)

    while True:
    raw_image = p.stdout.read(width * height * 3)

    if len(raw_image) < width*height*3:
    break

    image = np.frombuffer(raw_image, dtype='uint8').reshape((height, width, 3))
    cv2.imshow('image', image)
    key = cv2.waitKey(1)

    p.stdout.close()
    p.wait()
    cv2.destroyAllWindows()

    更新:
    根据您的 new question ,我设法创建了 RTSP 捕获示例:
    import cv2
    import numpy as np
    import subprocess as sp
    import shlex

    width = 240
    height = 160

    rtsp_url = 'rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4' # For testing, use public RTSP input.

    gstreamer_exe = 'gst-launch-1.0' # '/usr/bin/gst-launch-1.0'

    # https://stackoverflow.com/questions/29794053/streaming-mp4-video-file-on-gstreamer
    p = sp.Popen(shlex.split(f'{gstreamer_exe} --quiet rtspsrc location={rtsp_url} ! queue2 ! rtph264depay ! avdec_h264 ! videoconvert ! capsfilter caps="video/x-raw, format=BGR" ! fdsink'), stdout=sp.PIPE)

    while True:
    raw_image = p.stdout.read(width * height * 3)

    if len(raw_image) < width*height*3:
    break

    image = np.frombuffer(raw_image, np.uint8).reshape((height, width, 3))
    cv2.imshow('image', image)
    key = cv2.waitKey(1)

    p.stdout.close()
    p.wait()
    cv2.destroyAllWindows()

    enter image description here

    关于python - 如何使用带有 python 子进程模块或 gst-launch-1.0 命令的 gstreamer 接收字节流?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/71907857/

    25 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com