gpt4 book ai didi

c++ - OpenCV undistortPoints 没有给出失真模型的精确逆

转载 作者:太空宇宙 更新时间:2023-11-03 23:13:45 35 4
gpt4 key购买 nike

我正在使用 OpenCV 的失真模型进行一些测试。基本上我所做的是,实现失真方程并查看 cv::undistortPoints 函数是否给出这些方程的反函数。我意识到 cv::undistortPoints不完全给你反失真方程。当我看到这个时,我去了 cv::undistortPoints 的实现并意识到在 iterative process 的结束条件中。在计算失真模型的逆时,OpenCV 总是进行 5 次迭代(如果没有向函数提供失真系数,它实际上进行 0 次迭代)并且不在未失真点上使用任何误差度量来查看它是否精确地未失真。考虑到这一点,我复制并修改了迭代过程的终止条件以考虑错误指标。这给了我失真模型的精确反演。显示此内容的代码附在本文末尾。我的问题是:

发生这种情况是因为 OpenCV 更喜欢性能(花费更少的时间)而不是准确性(花费更多的时间)还是这只是一个“错误”? (很明显,在我提出的终止条件下,该函数将花费更多时间来消除每个点的失真)

非常感谢!

#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>

using namespace cv;

// This is a copy of the opencv implementation
void cvUndistortPoints_copy( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
const CvMat* _distCoeffs,
const CvMat* matR, const CvMat* matP )
{
double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
CvMat matA=cvMat(3, 3, CV_64F, A), _Dk;
CvMat _RR=cvMat(3, 3, CV_64F, RR);
const CvPoint2D32f* srcf;
const CvPoint2D64f* srcd;
CvPoint2D32f* dstf;
CvPoint2D64f* dstd;
int stype, dtype;
int sstep, dstep;
int i, j, n, iters = 1;

CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
(_src->rows == 1 || _src->cols == 1) &&
(_dst->rows == 1 || _dst->cols == 1) &&
_src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 &&
(CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
(CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));

CV_Assert( CV_IS_MAT(_cameraMatrix) &&
_cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 );

cvConvert( _cameraMatrix, &matA );

if( _distCoeffs )
{
CV_Assert( CV_IS_MAT(_distCoeffs) &&
(_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
(_distCoeffs->rows*_distCoeffs->cols == 4 ||
_distCoeffs->rows*_distCoeffs->cols == 5 ||
_distCoeffs->rows*_distCoeffs->cols == 8));

_Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);

cvConvert( _distCoeffs, &_Dk );
iters = 5;
}

if( matR )
{
CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 );
cvConvert( matR, &_RR );
}
else
cvSetIdentity(&_RR);

if( matP )
{
double PP[3][3];
CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4));
cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP );
cvMatMul( &_PP, &_RR, &_RR );
}

srcf = (const CvPoint2D32f*)_src->data.ptr;
srcd = (const CvPoint2D64f*)_src->data.ptr;
dstf = (CvPoint2D32f*)_dst->data.ptr;
dstd = (CvPoint2D64f*)_dst->data.ptr;
stype = CV_MAT_TYPE(_src->type);
dtype = CV_MAT_TYPE(_dst->type);
sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);

n = _src->rows + _src->cols - 1;

fx = A[0][0];
fy = A[1][1];
ifx = 1./fx;
ify = 1./fy;
cx = A[0][2];
cy = A[1][2];

for( i = 0; i < n; i++ )
{
double x, y, x0, y0;
if( stype == CV_32FC2 )
{
x = srcf[i*sstep].x;
y = srcf[i*sstep].y;
}
else
{
x = srcd[i*sstep].x;
y = srcd[i*sstep].y;
}

x0 = x = (x - cx)*ifx;
y0 = y = (y - cy)*ify;

// compensate distortion iteratively
int max_iters(500);
double e(1);
for( j = 0; j < max_iters && e>0; j++ )
{
double r2 = x*x + y*y;
double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
double xant = x;
double yant = y;
x = (x0 - deltaX)*icdist;
y = (y0 - deltaY)*icdist;
e = pow(xant - x,2)+ pow(yant - y,2);
}

double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];
double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];
double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);
x = xx*ww;
y = yy*ww;

if( dtype == CV_32FC2 )
{
dstf[i*dstep].x = (float)x;
dstf[i*dstep].y = (float)y;
}
else
{
dstd[i*dstep].x = x;
dstd[i*dstep].y = y;
}
}
}
void undistortPoints_copy( InputArray _src, OutputArray _dst,
InputArray _cameraMatrix,
InputArray _distCoeffs,
InputArray _Rmat=noArray(),
InputArray _Pmat=noArray() )
{
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();

CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));

_dst.create(src.size(), src.type(), -1, true);
Mat dst = _dst.getMat();

CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix;
CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
if( R.data )
pR = &(matR = R);
if( P.data )
pP = &(matP = P);
if( distCoeffs.data )
pD = &(_cdistCoeffs = distCoeffs);
cvUndistortPoints_copy(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP);
}

// Distortion model implementation
cv::Point2d distortPoint(cv::Point2d undistorted_point,cv::Mat camera_matrix, std::vector<double> distort_coefficients){

// Check that camera matrix is double
if (!(camera_matrix.type() == CV_64F || camera_matrix.type() == CV_64FC1)){
std::ostringstream oss;
oss<<"distortPoint(): Camera matrix type is wrong. It has to be a double matrix (CV_64)";
throw std::runtime_error(oss.str());
}

// Create distorted point
cv::Point2d distortedPoint;
distortedPoint.x = (undistorted_point.x - camera_matrix.at<double>(0,2))/camera_matrix.at<double>(0,0);
distortedPoint.y = (undistorted_point.y - camera_matrix.at<double>(1,2))/camera_matrix.at<double>(1,1);

// Get model
if (distort_coefficients.size() < 4 || distort_coefficients.size() > 8 ){
throw std::runtime_error("distortPoint(): Invalid numbrer of distortion coefficitnes.");
}
double k1(distort_coefficients[0]);
double k2(distort_coefficients[1]);
double p1(distort_coefficients[2]);// tangent distortion first coeficinet
double p2(distort_coefficients[3]);// tangent distortion second coeficinet
double k3(0);
double k4(0);
double k5(0);
double k6(0);
if (distort_coefficients.size() > 4)
k3 = distort_coefficients[4];
if (distort_coefficients.size() > 5)
k4 = distort_coefficients[5];
if (distort_coefficients.size() > 6)
k5 = distort_coefficients[6];
if (distort_coefficients.size() > 7)
k6 = distort_coefficients[7];

// Distort
double xcx = distortedPoint.x;
double ycy = distortedPoint.y;
double r2 = pow(xcx, 2) + pow(ycy, 2);
double r4 = pow(r2,2);
double r6 = pow(r2,3);
double k = (1+k1*r2+k2*r4+k3*r6)/(1+k4*r2+k5*r4+k5*r6);
distortedPoint.x = xcx*k + 2*p1*xcx*ycy + p2*(r2+2*pow(xcx,2));
distortedPoint.y = ycy*k + p1*(r2+2*pow(ycy,2)) + 2*p2*xcx*ycy;
distortedPoint.x = distortedPoint.x*camera_matrix.at<double>(0,0)+camera_matrix.at<double>(0,2);
distortedPoint.y = distortedPoint.y*camera_matrix.at<double>(1,1)+camera_matrix.at<double>(1,2);

// Exit
return distortedPoint;
}

int main(int argc, char** argv){

// Camera matrix
double cam_mat_da[] = {1486.58092,0,1046.72507,0,1489.8659,545.374244,0,0,1};
cv::Mat cam_mat(3,3,CV_64FC1,cam_mat_da);

// Distortion coefficients
double dist_coefs_da[] ={-0.13827409,0.29240721,-0.00088197,-0.00090189,0};
std::vector<double> dist_coefs(dist_coefs_da,dist_coefs_da+5);

// Distorted Point
cv::Point2d p0(0,0);
std::vector<cv::Point2d> p0_v;
p0_v.push_back(p0);

// Undistort Point
std::vector<cv::Point2d> ud_p_v;
cv::undistortPoints(p0_v,ud_p_v,cam_mat,dist_coefs);
cv::Point2d ud_p = ud_p_v[0];
ud_p.x = ud_p.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
ud_p.y = ud_p.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);

// Redistort Point
cv::Point2d p = distortPoint(ud_p, cam_mat,dist_coefs);

// Undistort Point using own termination of iterative process
std::vector<cv::Point2d> ud_p_v_local;
undistortPoints_copy(p0_v,ud_p_v_local,cam_mat,dist_coefs);
cv::Point2d ud_p_local = ud_p_v_local[0];
ud_p_local.x = ud_p_local.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
ud_p_local.y = ud_p_local.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);

// Redistort Point
cv::Point2d p_local = distortPoint(ud_p_local, cam_mat,dist_coefs);

// Display results
std::cout<<"Distorted original point: "<<p0<<std::endl;
std::cout<<"Undistorted point (CV): "<<ud_p<<std::endl;
std::cout<<"Distorted point (CV): "<<p<<std::endl;
std::cout<<"Erorr in the distorted point (CV): "<<sqrt(pow(p.x-p0.x,2)+pow(p.y-p0.y,2))<<std::endl;
std::cout<<"Undistorted point (Local): "<<ud_p_local<<std::endl;
std::cout<<"Distorted point (Local): "<<p_local<<std::endl;
std::cout<<"Erorr in the distorted point (Local): "<<sqrt(pow(p_local.x-p0.x,2)+pow(p_local.y-p0.y,2))<<std::endl;

// Exit
return 0;
}

最佳答案

如建议的那样,您可以从 OpenCV 论坛中获得真正的动力。但是请注意,从历史上看,OpenCV 是在考虑实时或近实时应用程序的情况下开发的(例如 Darpa Grand Challenge ),因此您会很容易找到优化速度而不是准确性的代码。

关于c++ - OpenCV undistortPoints 没有给出失真模型的精确逆,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44024133/

35 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com