gpt4 book ai didi

opencv - 应用冲浪算法后,opencv的视频流性能低下

转载 作者:行者123 更新时间:2023-12-02 17:53:04 25 4
gpt4 key购买 nike

我已应用SURF算法以检测相机流上的对象。但是,我说到流媒体播放有点慢。
当我使用Windows API GetTickCount()时,我发现这两个指令

detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );

每帧大约需要1200毫秒。

有什么解决办法吗?
提前致谢

这是完整的代码:
#include "stdafx.h"
#include <windows.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
//#include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"



using namespace cv;
using namespace std;

int main()
{
//reference image
Mat object = imread( "jus.png", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}

char key = 'a';
int framecount = 0;

SurfFeatureDetector detector( 400 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;

Mat frame, des_object, image;
Mat des_image, img_matches, H;

std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);

//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//cout<<"Info de lobjet: "<<object.dims<<" des_object, "<<des_object.dims<<" and kp_object: "<<kp_object.size()<<endl;

//create video capture object
VideoCapture cap(1);

//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );

int before, after;
//wile loop for real time detection
while (1)
{
//capture one frame from video and store it into image object name 'frame'
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}

//converting captured frame into gray scale
cvtColor(frame, image, CV_RGB2GRAY);

//extract detectors and descriptors of captured frame
before = GetTickCount();
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
after = GetTickCount();

cout<<"Time of detection and extraction is: "<< after-before<<endl;
//cout<<"Info de limage: "<<image.dims<<" des_image, "<<des_image.dims<<" and kp_image: "<<kp_image.size()<<endl;

//find matching descriptors of reference and captured image
matcher.knnMatch(des_object, des_image, matches, 2);

//finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
//used to find right matches
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}

//drawKeypoints(object, kp_object, object);

//Draw only "good" matches
//drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
//Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

//3 good matches are enough to describe an object as a right match.
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}

perspectiveTransform( obj_corners, scene_corners, H);

//Draw lines between the corners (the mapped object in the scene image )
line( frame, scene_corners[0] /*+ Point2f( object.cols, 0)*/, scene_corners[1] /*+ Point2f( object.cols, 0)*/, Scalar(100, 0, 0), 4 );
line( frame, scene_corners[1] /*+ Point2f( object.cols, 0)*/, scene_corners[2] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
line( frame, scene_corners[2] /*+ Point2f( object.cols, 0)*/, scene_corners[3] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
line( frame, scene_corners[3] /*+ Point2f( object.cols, 0)*/, scene_corners[0] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
}

//Show detected matches
imshow( "Good Matches", frame );

//clear array
good_matches.clear();

key = waitKey(33);
}
return 0;
}

最佳答案

  • 在调用特征检测之前,将框架调整为较小的尺寸。例如,在每个维度上将图像缩放0.5倍,可使函数运行速度提高4倍。
  • 注意SURF检测器具有一些可选参数:http://docs.opencv.org/modules/nonfree/doc/feature_detection.html#surf-surf。您可以减少 Octave 音阶数和一个 Octave 音阶中的层数以提高速度,但是您可能必须权衡对象检测性能。
  • 关于opencv - 应用冲浪算法后,opencv的视频流性能低下,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/15172259/

    25 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com