gpt4 book ai didi

python - 如何使用 ffmpeg gpu 编码将帧从视频保存到内存?

转载 作者:行者123 更新时间:2023-12-04 22:46:24 47 4
gpt4 key购买 nike

我正在尝试从视频中提取帧并将它们保存到内存 (ram)。使用 CPU 编码,我没有任何问题:

ffmpeg -i input -s 224x224 -pix_fmt bgr24 -vcodec rawvideo -an -sn -f image2pipe -

但是,当我尝试使用某些 NVIDIA GPU 编码时,我总是得到嘈杂的图像。我尝试使用不同的命令,但在 Windows 和 Ubuntu 上结果总是一样。

ffmpeg -hwaccel cuda -i 12.mp4 -s 224x224 -f image2pipe - -vcodec rawvideo

在磁盘上保存 JPG,我没有任何问题。

ffmpeg -hwaccel cuvid -c:v h264_cuvid -resize 224x224 -i {input_video} \
-vf thumbnail_cuda=2,hwdownload,format=nv12 {output_dir}/%d.jpg

我的 python 代码用于测试这些命令:

import cv2
import subprocess as sp
import numpy

IMG_W = 224
IMG_H = 224
input = '12.mp4'

ffmpeg_cmd = [ 'ffmpeg','-i', input,'-s', '224x224','-pix_fmt', 'bgr24', '-vcodec', 'rawvideo', '-an','-sn', '-f', 'image2pipe', '-']


#ffmpeg_cmd = ['ffmpeg','-hwaccel' ,'cuda' ,'-i' ,'12.mp4','-s', '224x224','-f' , 'image2pipe' ,'-' , '-vcodec' ,'rawvideo']

pipe = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=10)
images = []
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
cnt = 0
while True:
cnt += 1
raw_image = pipe.stdout.read(IMG_W*IMG_H*3)
image = numpy.fromstring(raw_image, dtype='uint8') # convert read bytes to np
if image.shape[0] == 0:
del images
break
else:
image = image.reshape((IMG_H,IMG_W,3))


cv2.imshow('test',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

pipe.stdout.flush()
cv2.destroyAllWindows()

最佳答案

为了加速 H.264 解码,最好选择 -c:v h264_cuvid - 它在 GPU 中使用专用视频硬件。

用GPU-Z监控软件测试,好像-hwaccel cuda也用了专用加速器(同-c:v h264_cuvid),但我不是当然。

注意:

  • NVIDIA CUVID 视频解码加速器不支持所有尺寸和像素格式。

问题:

  • bufsize=10太小了,最好不要设置bufsize参数,设置bufsize=10

  • 代替 '-f', 'image2pipe',使用 '-f', 'rawvideo'(我们正在从管道读取原始视频帧,而不是图像 [如 JPEG 或 PNG])。
    我们可以在使用 '-f', 'rawvideo' 时删除 '-vcodec', 'rawvideo'

  • 我们不需要参数 '-s', '224x224',因为输出大小从输入视频中已知。

更新的 FFmpeg 命令:

ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-pix_fmt', 'bgr24', '-f', 'rawvideo', '-']

为了创建可重现的代码示例,我首先创建一个合成视频文件 'test.mp4',它将用作输入:

# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12',
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################

这是一个完整的(可执行的)代码示例:

import cv2
import subprocess as sp
import numpy


IMG_W = 224
IMG_H = 224
input = 'test.mp4'

# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12',
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################

# There is no damage using both '-hwaccel cuda' and '-c:v 'h264_cuvid'.
ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-pix_fmt', 'bgr24', '-f', 'rawvideo', '-']

pipe = sp.Popen(ffmpeg_cmd, stdout=sp.PIPE)

cnt = 0
while True:
cnt += 1
raw_image = pipe.stdout.read(IMG_W*IMG_H*3)
image = numpy.fromstring(raw_image, dtype='uint8') # convert read bytes to np
if image.shape[0] == 0:
break
else:
image = image.reshape((IMG_H, IMG_W, 3))

cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

pipe.stdout.close()
pipe.wait()
cv2.destroyAllWindows()

更新:

生成 JPEG 而不是原始帧:

我找到的用于在内存中构建 JPEG 图像列表的解决方案应用输出流的“手动”解析。

FFmpeg 命令(选择 YUV420 像素格式):

ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-c:v', 'mjpeg', '-pix_fmt', 'yuvj420p', '-f', 'image2pipe', '-']

JPEG file format在 SOS 负载的 header 中没有长度。
查找 SOS 负载的末尾需要字节扫描,并且使用 Python 实现非常慢。

以下解决方案与大多数用户无关。
我决定发布它,因为它可能与某人相关。

这里是一个代码示例(第一部分构建用于测试的合成视频文件):

import cv2
import subprocess as sp
import numpy as np
import struct


IMG_W = 224
IMG_H = 224
input = 'test.mp4'

# Build synthetic video file for testing.
################################################################################
sp.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', f'testsrc=size={IMG_W}x{IMG_H}:rate=1',
'-f', 'lavfi', '-i', 'sine=frequency=300', '-c:v', 'libx264', '-pix_fmt', 'nv12',
'-c:a', 'aac', '-ar', '22050', '-t', '50', input])
################################################################################

def read_from_pipe(p_stdout, n_bytes):
""" Read n_bytes bytes from p_stdout pipe, and return the read data bytes. """
data = p_stdout.read(n_bytes)
while len(data) < n_bytes:
data += p_stdout.read(n_bytes - len(data))

return data


ffmpeg_cmd = ['ffmpeg', '-hwaccel', 'cuda', '-c:v', 'h264_cuvid', '-i', input, '-c:v', 'mjpeg', '-pix_fmt', 'yuvj420p', '-f', 'image2pipe', '-']

pipe = sp.Popen(ffmpeg_cmd, stdout=sp.PIPE)

jpg_list = []

cnt = 0
while True:
if not pipe.poll() is None:
break

# https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format
jpeg_parts = []

# SOI
soi = read_from_pipe(pipe.stdout, 2) # Read Start of Image (FF D8)
assert soi == b'\xff\xd8', 'Error: first two bytes are not FF D8'
jpeg_parts.append(soi)

# JFIF APP0 marker segment
marker = read_from_pipe(pipe.stdout, 2) # APP0 marker (FF E0)
assert marker == b'\xff\xe0', 'Error: APP0 marker is not FF E0'
jpeg_parts.append(marker)

xx = 0

# Keep reading markers and segments until marker is EOI (0xFFD9)
while xx != 0xD9: # marker != b'\xff\xd9':
# Length of segment excluding APP0 marker
length_of_segment = read_from_pipe(pipe.stdout, 2)
jpeg_parts.append(length_of_segment)
length_of_segment = struct.unpack('>H', length_of_segment)[0] # Unpack to uint16 (big endian)

segment = read_from_pipe(pipe.stdout, length_of_segment - 2) # Read the segment (minus 2 bytes because length includes the 2 bytes of length)
jpeg_parts.append(segment)

marker = read_from_pipe(pipe.stdout, 2) # JFXX-APP0 marker (FF E0) or SOF or DHT or COM or SOS or EOI
jpeg_parts.append(marker)

if marker == b'\xff\xda': # SOS marker (0xFFDA)
# https://stackoverflow.com/questions/26715684/parsing-jpeg-sos-marker
# Summary of how to find next marker after SOS marker (0xFFDA):
#
# Skip first 3 bytes after SOS marker (2 bytes header size + 1 byte number of image components in scan).
# Search for next FFxx marker (skip every FF00 and range from FFD0 to FFD7 because they are part of scan).
# *This is summary of comments below post of user3344003 + my knowledge + Table B.1 from https://www.w3.org/Graphics/JPEG/itu-t81.pdf.
#
# *Basing on Table B.1 I can also suspect that values FF01 and FF02 through FFBF should also be skipped in point 2 but I am not sure if they cannot appear as part of encoded SOS data.
first3bytes = read_from_pipe(pipe.stdout, 3)
jpeg_parts.append(first3bytes) # Skip first 3 bytes after SOS marker (2 bytes header size + 1 byte number of image components in scan).

xx = 0

# Search for next FFxx marker, skip every FF00 and range from FFD0 to FFD7 and FF01 and FF02 through FFBF
while (xx < 0xBF) or ((xx >= 0xD0) and (xx <= 0xD7)):
# Search for next FFxx marker
b = 0
while b != 0xFF:
b = read_from_pipe(pipe.stdout, 1)
jpeg_parts.append(b)
b = b[0]

xx = read_from_pipe(pipe.stdout, 1) # Read next byte after FF
jpeg_parts.append(xx)
xx = xx[0]

# Join list parts elements to bytes array, and append the bytes array to jpg_list (convert to NumPy array).
jpg_list.append(np.frombuffer(b''.join(jpeg_parts), np.uint8))

cnt += 1


pipe.stdout.close()
pipe.wait()


# Decode and show images for testing
for im in jpg_list:
image = cv2.imdecode(im, cv2.IMREAD_UNCHANGED)

cv2.imshow('test', image)
if cv2.waitKey(100) & 0xFF == ord('q'):
break

cv2.destroyAllWindows()

关于python - 如何使用 ffmpeg gpu 编码将帧从视频保存到内存?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/68196266/

47 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com