gpt4 book ai didi

c++ - 基于FlannBasedMatcher的SURF特征提取和关键点匹配

转载 作者:太空宇宙 更新时间:2023-11-03 22:14:30 24 4
gpt4 key购买 nike

以下是我的代码,它用于使用 SURF 提取特征,并将使用 flannBasedMatcher 匹配点。

Mat object = imread("S6E0.bmp",  CV_LOAD_IMAGE_GRAYSCALE);

if( !object.data )
{
// std::cout<< "Error reading object " << std::endl;
return -2;
}

//Detect the keypoints using SURF Detector

int minHessian = 500;

SurfFeatureDetector detector( minHessian );

std::vector<KeyPoint> kp_object;

detector.detect( object, kp_object );

//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;

Mat des_object;

extractor.compute( object, kp_object, des_object );

FlannBasedMatcher matcher;
char key = 'a';
//VideoCapture cap(0);

namedWindow("Good Matches");

std::vector<Point2f> obj_corners(4);

//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );

Mat image = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat des_image, img_matches;

std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch >> matches;

std::vector<std::vector<cv::DMatch>> matches1;
std::vector<std::vector<cv::DMatch>> matches2;
std::vector<cv::DMatch> matches3;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;

std::vector<Point2f> scene_corners(4);

Mat H;

//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );


matcher.knnMatch(des_object, des_image, matches, 2);



for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}

//Draw only "good" matches

drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

if (good_matches.size() >= 4)
{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());

//return 1;

for( int i = 0; i < good_matches.size(); i++ )
{

//Get the keypoints from the good matches

obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}

//H = findHomography( obj, scene, CV_RANSAC );
//printf("Size : %d", H.size());
//perspectiveTransform( obj_corners, scene_corners, H);
//printf("Size : %d --- %d --- %d", H.size(), scene_corners.size());

}else{

printf("Images matching %d , %d", good_matches.size(), kp_object.size());
}

//Show detected matches

imshow( "Good Matches", img_matches );
waitKey(0);
return 0;

在这段代码中我想知道通过这个方法到底发生了什么

matcher.knnMatch(des_object, des_image, matches, 2);

据我所知,我传递了匹配图像的两个描述符,匹配 vector 填充了 2 个最近的邻居。我想知道方法中到底发生了什么,以及 matches 方法是如何填充的,填充了哪些点。

在这段代码中

for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}

我使用最近邻距离比 (nndr) 为 0.6,我想知道 good_matches 是如何找到的,以及 nndr 值的变化将如何影响。

如果我能解决这段代码,那将是一个很大的帮助。谢谢。

最佳答案

FlannBasedMatcher 基于 Muja et. al. 撰写的论文;您可以在那里找到确切的算法以及它们是如何进行的。

关于good_matches,您刚刚在代码片段本身中看到它是最佳匹配的集合,您的结果基于标准,即nndr.. 它基本上是一个阈值,决定在完全放弃匹配之前允许匹配多远。阈值越高,考虑的点越多,正匹配的数量也越多(无论它们是 真阳性与否将取决于您的数据集和您设置 nndr 级别的方式)。

希望这对您有所帮助。

关于c++ - 基于FlannBasedMatcher的SURF特征提取和关键点匹配,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/19750239/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com