gpt4 book ai didi

c++ - 使用OpenCV从2D图像点计算3D世界点

转载 作者:可可西里 更新时间:2023-11-01 05:35:06 29 4
gpt4 key购买 nike

我正在为 iOS 开发应用程序。我正在根据 Mastering OpenCV 一书使用相机矩阵。在我的场景中,我有一个众所周知的盒子。我知道它的真实尺寸,也确切知道它角的像素。使用此信息,我计算相机旋转和平移 vector 。根据这些参数,我能够计算出相机位置。我通过将 3D 世界坐标投影回图像来检查我的计算,我得到了非常准确的结果。

在我的例子中,世界原点是盒子底线的中间。盒子是从一侧打开的。图像是朝那个方向拍摄的,所以我可以看到盒子里的东西。

现在,我的盒子里有东西。我非常了解这个物体角的图像坐标(2D)。我知道角落的真实高度(真实的 Y 和 Y <> 0)。如何计算对象角的世界 X 和 Z。

这是我的代码:

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <ctype.h>

using namespace cv;
using namespace std;


Point2f point;
vector<vector<Point2f>> objectPoints(1);
vector<vector<Point2f>> boxPoints(1);

Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;

// This fiunction I need to complete
return tmpPoint;
}

int main( int argc, char** argv )
{

///////// Loading image
Mat sourceImage = imread("/Users/Ilan/Xcode/LK Test/LK Test/images/box_center640X480.jpg");

namedWindow( "Source", 1 );

///// Setting box corners /////
point = Point2f((float)102,(float)367.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][0], 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)83,(float)90.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][1], 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)520,(float)82.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][2], 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)510.5,(float)361); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][3], 3, Scalar(0,255,0), -1, 8);

///// Setting object corners /////
point = Point2f((float)403.5,(float)250); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][0], 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)426.5,(float)251.5); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][1], 3, Scalar(0,255,0), -1, 8);

imshow("Source", sourceImage);

vector<vector<Point3f>> worldBoxPoints(1);
Point3f tmpPoint;

tmpPoint = Point3f((float)-100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)-100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);

std::cout << "There are " << boxPoints[0].size() << " roomPoints and " << worldBoxPoints[0].size() << " worldRoomPoints." << std::endl;

cv::Mat cameraMatrix1(3,3,cv::DataType<double>::type);
cv::setIdentity(cameraMatrix1);

cv::Mat distCoeffs1(4,1,cv::DataType<double>::type);
distCoeffs1.at<double>(0) = 0;
distCoeffs1.at<double>(1) = 0;
distCoeffs1.at<double>(2) = 0;
distCoeffs1.at<double>(3) = 0;


//Taken from Mastring OpenCV
double fx = 6.24860291e+02 * ((float)(sourceImage.cols)/352.);
double fy = 6.24860291e+02 * ((float)(sourceImage.rows)/288.);
double cx = (float)(sourceImage.cols)/2.;
double cy = (float)(sourceImage.rows)/2.;

cameraMatrix1.at<double>(0, 0) = fx;
cameraMatrix1.at<double>(1, 1) = fy;
cameraMatrix1.at<double>(0, 2) = cx;
cameraMatrix1.at<double>(1, 2) = cy;

std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1 << std::endl;
std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;

cv::Mat rvec1(3,1,cv::DataType<double>::type);
cv::Mat tvec1(3,1,cv::DataType<double>::type);

cv::solvePnP(worldBoxPoints[0], boxPoints[0], cameraMatrix1, distCoeffs1, rvec1, tvec1);

std::cout << "rvec --- 1: " << rvec1 << std::endl;
std::cout << "tvec --- 1: " << tvec1 << std::endl;

cv::Mat rvecM1(3,3,cv::DataType<double>::type);
cv::Rodrigues(rvec1,rvecM1);

std::cout << "cameraRotation --- 1 : " << rvecM1 << std::endl;
std::cout << "cameraPosition --- 1 : " << (rvecM1.t())*((-1.0)*tvec1) << std::endl;

std::vector<cv::Point2f> projectedPoints1;
cv::projectPoints(worldBoxPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);

for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "box point --- 1: " << boxPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}

vector<vector<Point3f>> worldObjectPoints(1);

tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][0].x, objectPoints[0][0].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);

tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][1].x, objectPoints[0][1].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);

cv::projectPoints(worldObjectPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "object point --- 1: " << objectPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}

waitKey(0);

return 0;
}

所以,我想实现 calc3DPointOutOf2DwithYknown 函数。当然参数是按照我现在理解的。如果我需要其他参数,我会使用其他参数。

非常感谢,宜兰

最佳答案

我自己成功解决了。如果它对任何人有帮助,这里是代码:

Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;

float r1 = rotMat.at<double>(0,0);
float r2 = rotMat.at<double>(0,1);
float r3 = rotMat.at<double>(0,2);

float r4 = rotMat.at<double>(1,0);
float r5 = rotMat.at<double>(1,1);
float r6 = rotMat.at<double>(1,2);

float r7 = rotMat.at<double>(2,0);
float r8 = rotMat.at<double>(2,1);
float r9 = rotMat.at<double>(2,2);

float t1 = tvec.at<double>(0,0);
float t2 = tvec.at<double>(1,0);
float t3 = tvec.at<double>(2,0);

float xt = (u/fx) - (cx/fx);
float yt = (v/fy) - (cy/fy);

float K1 = xt*r8*worldY + xt*t3 - r2*worldY - t1;
float K2 = xt*r9 - r3;
float K3 = r1 - xt*r7;


float worldZ = (yt*r7*K1 + yt*K3*r8*worldY + yt*K3*t3 - r4*K1 - K3*r5*worldY - K3*t2)/
(r4*K2 + K3*r6 - yt*r7*K2 - yt*K3*r9);

float worldX = (K1 + worldZ*K2)/K3;


tmpPoint = Point3f(worldX, worldY, worldZ);

return tmpPoint;
}

关于c++ - 使用OpenCV从2D图像点计算3D世界点,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24944266/

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com