gpt4 book ai didi

qt - 如何使用 QMediaPlayer 保存帧?

转载 作者:行者123 更新时间:2023-12-03 22:56:00 28 4
gpt4 key购买 nike

我想从 QMediaPlayer 中保存帧的图像.阅读文档后,我明白我应该使用 QVideoProbe .我正在使用以下代码:

QMediaPlayer *player = new QMediaPlayer();
QVideoProbe *probe = new QVideoProbe;

connect(probe, SIGNAL(videoFrameProbed(QVideoFrame)), this, SLOT(processFrame(QVideoFrame)));

qDebug()<<probe->setSource(player); // Returns true, hopefully.

player->setVideoOutput(myVideoSurface);
player->setMedia(QUrl::fromLocalFile("observation.mp4"));
player->play(); // Start receving frames as they get presented to myVideoSurface

但不幸的是, probe->setSource(player)总是返回 false对我来说,因此我的插槽 processFrame未触发。

我究竟做错了什么 ?有人有 QVideoProbe 的工作示例吗? ?

最佳答案

你没有做错任何事。正如@DYangu 指出的那样,您的媒体对象实例不支持监控视频。我遇到了同样的问题( QAudioProbe 也是如此,但我们在这里不感兴趣)。我通过查看 this 找到了解决方案回答和this one .

主要思路是到子类 QAbstractVideoSurface .完成后,它将调用方法 QAbstractVideoSurface::present(const QVideoFrame & frame)您对 QAbstractVideoSurface 的实现并且您将能够处理视频的帧。

据说here ,通常你只需要重新实现两个方法:

  • supportedPixelFormats以便生产者可以为 QVideoFrame 选择合适的格式
  • present允许显示框架

  • 但当时我在Qt源码中搜索,开心地找到了 this piece of code这帮助我进行了全面实现。所以, 这是完整的代码 用于使用“视频帧采集器”。

    VideoFrameGrabber.cpp:
    #include "VideoFrameGrabber.h"

    #include <QtWidgets>
    #include <qabstractvideosurface.h>
    #include <qvideosurfaceformat.h>

    VideoFrameGrabber::VideoFrameGrabber(QWidget *widget, QObject *parent)
    : QAbstractVideoSurface(parent)
    , widget(widget)
    , imageFormat(QImage::Format_Invalid)
    {
    }

    QList<QVideoFrame::PixelFormat> VideoFrameGrabber::supportedPixelFormats(QAbstractVideoBuffer::HandleType handleType) const
    {
    Q_UNUSED(handleType);
    return QList<QVideoFrame::PixelFormat>()
    << QVideoFrame::Format_ARGB32
    << QVideoFrame::Format_ARGB32_Premultiplied
    << QVideoFrame::Format_RGB32
    << QVideoFrame::Format_RGB24
    << QVideoFrame::Format_RGB565
    << QVideoFrame::Format_RGB555
    << QVideoFrame::Format_ARGB8565_Premultiplied
    << QVideoFrame::Format_BGRA32
    << QVideoFrame::Format_BGRA32_Premultiplied
    << QVideoFrame::Format_BGR32
    << QVideoFrame::Format_BGR24
    << QVideoFrame::Format_BGR565
    << QVideoFrame::Format_BGR555
    << QVideoFrame::Format_BGRA5658_Premultiplied
    << QVideoFrame::Format_AYUV444
    << QVideoFrame::Format_AYUV444_Premultiplied
    << QVideoFrame::Format_YUV444
    << QVideoFrame::Format_YUV420P
    << QVideoFrame::Format_YV12
    << QVideoFrame::Format_UYVY
    << QVideoFrame::Format_YUYV
    << QVideoFrame::Format_NV12
    << QVideoFrame::Format_NV21
    << QVideoFrame::Format_IMC1
    << QVideoFrame::Format_IMC2
    << QVideoFrame::Format_IMC3
    << QVideoFrame::Format_IMC4
    << QVideoFrame::Format_Y8
    << QVideoFrame::Format_Y16
    << QVideoFrame::Format_Jpeg
    << QVideoFrame::Format_CameraRaw
    << QVideoFrame::Format_AdobeDng;
    }

    bool VideoFrameGrabber::isFormatSupported(const QVideoSurfaceFormat &format) const
    {
    const QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat(format.pixelFormat());
    const QSize size = format.frameSize();

    return imageFormat != QImage::Format_Invalid
    && !size.isEmpty()
    && format.handleType() == QAbstractVideoBuffer::NoHandle;
    }

    bool VideoFrameGrabber::start(const QVideoSurfaceFormat &format)
    {
    const QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat(format.pixelFormat());
    const QSize size = format.frameSize();

    if (imageFormat != QImage::Format_Invalid && !size.isEmpty()) {
    this->imageFormat = imageFormat;
    imageSize = size;
    sourceRect = format.viewport();

    QAbstractVideoSurface::start(format);

    widget->updateGeometry();
    updateVideoRect();

    return true;
    } else {
    return false;
    }
    }

    void VideoFrameGrabber::stop()
    {
    currentFrame = QVideoFrame();
    targetRect = QRect();

    QAbstractVideoSurface::stop();

    widget->update();
    }

    bool VideoFrameGrabber::present(const QVideoFrame &frame)
    {
    if (frame.isValid())
    {
    QVideoFrame cloneFrame(frame);
    cloneFrame.map(QAbstractVideoBuffer::ReadOnly);
    const QImage image(cloneFrame.bits(),
    cloneFrame.width(),
    cloneFrame.height(),
    QVideoFrame::imageFormatFromPixelFormat(cloneFrame .pixelFormat()));
    emit frameAvailable(image); // this is very important
    cloneFrame.unmap();
    }

    if (surfaceFormat().pixelFormat() != frame.pixelFormat()
    || surfaceFormat().frameSize() != frame.size()) {
    setError(IncorrectFormatError);
    stop();

    return false;
    } else {
    currentFrame = frame;

    widget->repaint(targetRect);

    return true;
    }
    }

    void VideoFrameGrabber::updateVideoRect()
    {
    QSize size = surfaceFormat().sizeHint();
    size.scale(widget->size().boundedTo(size), Qt::KeepAspectRatio);

    targetRect = QRect(QPoint(0, 0), size);
    targetRect.moveCenter(widget->rect().center());
    }

    void VideoFrameGrabber::paint(QPainter *painter)
    {
    if (currentFrame.map(QAbstractVideoBuffer::ReadOnly)) {
    const QTransform oldTransform = painter->transform();

    if (surfaceFormat().scanLineDirection() == QVideoSurfaceFormat::BottomToTop) {
    painter->scale(1, -1);
    painter->translate(0, -widget->height());
    }

    QImage image(
    currentFrame.bits(),
    currentFrame.width(),
    currentFrame.height(),
    currentFrame.bytesPerLine(),
    imageFormat);

    painter->drawImage(targetRect, image, sourceRect);

    painter->setTransform(oldTransform);

    currentFrame.unmap();
    }
    }

    VideoFrameGrabber.h
    #ifndef VIDEOFRAMEGRABBER_H
    #define VIDEOFRAMEGRABBER_H

    #include <QtWidgets>

    class VideoFrameGrabber : public QAbstractVideoSurface
    {
    Q_OBJECT

    public:
    VideoFrameGrabber(QWidget *widget, QObject *parent = 0);

    QList<QVideoFrame::PixelFormat> supportedPixelFormats(
    QAbstractVideoBuffer::HandleType handleType = QAbstractVideoBuffer::NoHandle) const;
    bool isFormatSupported(const QVideoSurfaceFormat &format) const;

    bool start(const QVideoSurfaceFormat &format);
    void stop();

    bool present(const QVideoFrame &frame);

    QRect videoRect() const { return targetRect; }
    void updateVideoRect();

    void paint(QPainter *painter);

    private:
    QWidget *widget;
    QImage::Format imageFormat;
    QRect targetRect;
    QSize imageSize;
    QRect sourceRect;
    QVideoFrame currentFrame;

    signals:
    void frameAvailable(QImage frame);
    };
    #endif //VIDEOFRAMEGRABBER_H

    备注 : 在 .h 中,你会看到我添加了一个 signal将图像作为参数。 这将允许您处理您的框架 在您的代码中的任何地方。当时,这个信号花了 QImage作为参数,但您当然可以采用 QVideoFrame如果你想。

    现在,我们准备使用这个视频帧采集器:
    QMediaPlayer* player = new QMediaPlayer(this);
    // no more QVideoProbe
    VideoFrameGrabber* grabber = new VideoFrameGrabber(this);
    player->setVideoOutput(grabber);

    connect(grabber, SIGNAL(frameAvailable(QImage)), this, SLOT(processFrame(QImage)));

    现在你只需要声明一个名为 processFrame(QImage image) 的插槽您将收到 QImage每次进入方法 present您的 VideoFrameGrabber .

    我希望这会帮助你!

    关于qt - 如何使用 QMediaPlayer 保存帧?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/37724602/

    28 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com