gpt4 book ai didi

visual-studio-2010 - OpenCV:使用具有 SURF 功能的彩色帧的问题

转载 作者:太空宇宙 更新时间:2023-11-03 21:47:37 25 4
gpt4 key购买 nike

我正在尝试使用此代码,以便可以使用彩色帧/图像实现 SURF,然后使用此处的代码 Kalman_Color_Object_Track通过卡尔曼滤波器使用颜色值跟踪检测到的对象。所以,这些是我打算执行的步骤,但我被卡住了,因为这个 SURF 检测代码不接受/处理彩色图像:

  1. "book1.png"为彩色图片
  2. 在从传入帧中检测到图像周围的矩形后,由于 Kalman_Color_Object_Track 代码在 C++ 中,Mat 结构更改为 IplImage

    dest_image=cvCloneImage(&(IplImage)image);

    mat_frame=cvCloneImage(&(IplImage)frame);

  3. 调用 Kalman_Color_Object_Track( mat_frame,dest_image,30); 方法。

问题:(A) 如何使这段代码工作以便可以提取和检测彩色图像的 SURF 特征? (B) 我不确定应该在 Kalman_Color_Object_Track() 的函数签名中传递什么以及 (C) 它应该在对象检测模块中的确切位置被调用?

     #include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"

using namespace cv;
IplImage *mat_dest_image=0;
IplImage *mat_frame=0;
/* Object Detection and recognition from video*/



int main()
{
Mat object = imread( "book1.png", );

if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}

//Detect the keypoints using SURF Detector
int minHessian = 500;

SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;

detector.detect( object, kp_object );

//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;

extractor.compute( object, kp_object, des_object );

FlannBasedMatcher matcher;



namedWindow("Good Matches");
namedWindow("Tracking");

std::vector<Point2f> obj_corners(4);

//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );

char key = 'a';
int framecount = 0;
VideoCapture cap("booksvideo.avi");

for(; ;)

{
Mat frame;
cap >> frame;
imshow("Good Matches", frame);


Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;

//cvtColor(frame, image, CV_RGB2GRAY);

detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );

matcher.knnMatch(des_object, des_image, matches, 2);

for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}

//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}

H = findHomography( obj, scene, CV_RANSAC );

perspectiveTransform( obj_corners, scene_corners, H);

//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
mat_dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);

Kalman_Color_Object_Track( ); // The tracking method
}

//Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);

}
return 0;

}

最佳答案

This paper通过独立计算每个 channel 的梯度直方图,在彩色图像上实现了 SIFT 描述符。也许您可以对 SURF 功能尝试相同的方法。

关于visual-studio-2010 - OpenCV:使用具有 SURF 功能的彩色帧的问题,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/14884049/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com