gpt4 book ai didi

c++ - OpenCV : How to find the center of mass/centroid for motion information

转载 作者:塔克拉玛干 更新时间:2023-11-03 07:28:56 25 4
gpt4 key购买 nike

问题是我无法用现有代码实现质心,在检测到的对象被矩形包围后使用哪个图像对象等,这样我就可以获得路径的轨迹。 我正在使用 Opencv2.3 。我发现有两种方法 - Link1Link2谈谈moments的用法。另一种方法是使用边界框的信息Link3 .矩量法需要图像阈值化。然而,当使用 SURF 时,图像是灰度的。因此,通过灰色图像进行阈值处理时会显示白色图像!现在,我很难理解我应该如何使用下面的代码计算质心(特别是我应该使用什么而不是 points[i].x 因为我正在使用

obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt )

在我的例子中 numPoints=good_matches.size(),表示特征点的数量)如文档中所述。如果任何人都可以提出如何将 SURF 与质心一起使用的实现,那将会很有帮助。

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"

using namespace cv;

int main()
{
Mat object = imread( "object.png", CV_LOAD_IMAGE_GRAYSCALE );

if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}

//Detect the keypoints using SURF Detector
int minHessian = 500;

SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;

detector.detect( object, kp_object );

//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;

extractor.compute( object, kp_object, des_object );

FlannBasedMatcher matcher;

VideoCapture cap(0);

namedWindow("Good Matches");

std::vector<Point2f> obj_corners(4);

//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );

char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;

if (framecount < 5)
{
framecount++;
continue;
}

Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;

cvtColor(frame, image, CV_RGB2GRAY);

detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );

matcher.knnMatch(des_object, des_image, matches, 2);

for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][4].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}

//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}

H = findHomography( obj, scene, CV_RANSAC );

perspectiveTransform( obj_corners, scene_corners, H);

//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}

//Show detected matches
imshow( "Good Matches", img_matches );

key = waitKey(1);
}
return 0;
}

最佳答案

所以,你已经得到了你的点列表,

obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );

我认为,基于此计算质心是完全有效的,不需要进一步的图像处理。

有两种方法,“质心”方式,就是所有点的平均位置,如下所示:

Point2f cen(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
cen.x += scene[i].x;
cen.y += scene[i].y;
}
cen.x /= scene.size();
cen.y /= scene.size();

和“bbox 中心”方式

Point2f pmin(1000000,1000000);
Point2f pmax(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
if ( scene[i].x < pmin.x ) pmin.x = scene[i].x;
if ( scene[i].y < pmin.y ) pmin.y = scene[i].y;
if ( scene[i].x > pmax.x ) pmax.x = scene[i].x;
if ( scene[i].y > pmax.y ) pmax.y = scene[i].y;
}
Point2f cen( (pmax.x-pmin.x)/2, (pmax.y-pmin.y)/2);

请注意,结果会有所不同!它们仅对圆形和正方形、点对称对象相同

// now draw a circle around the centroid:
cv::circle( img, cen, 10, Scalar(0,0,255), 2 );

// and a line connecting the query and train points:
cv::line( img, scene[i], obj[i], Scalar(255,0,0), 2 );

关于c++ - OpenCV : How to find the center of mass/centroid for motion information,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/14995512/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com