gpt4 book ai didi

c++ - opencv 解码格雷码模式相机校准错误。如何格式化内在和外在结果?

转载 作者:行者123 更新时间:2023-11-28 05:17:38 28 4
gpt4 key购买 nike

这是我潜伏多年后的第一个问题。残酷一点,因为我想成为一个多年来帮助了我很多的网站的好成员(member)。

我开始使用 opencv;最初使用 python,但现在使用 c++。我运行了以下代码来生成内部和外部 yml 文件。(校准是用 14 个棋盘图像对进行的)

我遇到的问题是,当我在 opencv 解码格雷码模式示例 (http://docs.opencv.org/master/dc/da9/tutorial_decode_graycode_pattern.html) 中尝试输出 yml 文件时,我得到以下输出

------------------------COPY/PASTE from windows 10 powershell---------------------

.\SL_GREYDECODE_V00.exe .\cam1list.yml .\intrinsics.yml 1280 720

cam1intrinsics
[]
cam1distCoeffs
[]
cam2intrinsics
[]
cam2distCoeffs
[]
T
[15.77367340108225;
0.04283622292590028;
5.022783328785999]
R
[0.8813890844929032, -0.01214882539600122, -0.4722347803564023;
0.01599865793973636, 0.9998634526563561, 0.004137509666229421;
0.472120032069059, -0.01120187857496709, 0.8814630980565792]
Failed to load cameras calibration parameters

------------------------------------------------------------------------------

yml 文件是否存在某种缺陷?我错过了一些明显的事情吗?我知道这些值不是很好,但在这一点上我想确保我能真正完成整个过程,然后改进校准精度。

任何帮助将不胜感激。

完整的立体校准代码

   /* This is sample from the OpenCV book. The copyright notice is below */

/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warranty, support or any guarantee of it working.

BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008

AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130

OPENCV WEBSITES:
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
Issue tracker: http://code.opencv.org
GitHub: https://github.com/opencv/opencv/
************************************************** */

#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"

#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>

using namespace cv;
using namespace std;

static int print_help()
{
cout <<
" Given a list of chessboard images, the number of corners (nx, ny)\n"
" on the chessboards, and a flag: useCalibrated for \n"
" calibrated (0) or\n"
" uncalibrated \n"
" (1: use cvStereoCalibrate(), 2: compute fundamental\n"
" matrix separately) stereo. \n"
" Calibrate the cameras and display the\n"
" rectified results along with the computed disparity images. \n" << endl;
cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=../data/stereo_calib.xml>\n" << endl;
return 0;
}


static void
StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize, bool displayCorners = false, bool useCalibrated=true, bool showRectified=true)
{
if( imagelist.size() % 2 != 0 )
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return;
}

const int maxScale = 2;
// ARRAY AND VECTOR STORAGE:

vector<vector<Point2f> > imagePoints[2];
vector<vector<Point3f> > objectPoints;
Size imageSize;

int i, j, k, nimages = (int)imagelist.size()/2;

imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;

for( i = j = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
const string& filename = imagelist[i*2+k];
Mat img = imread(filename, 0);
if(img.empty())
break;
if( imageSize == Size() )
imageSize = img.size();
else if( img.size() != imageSize )
{
cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
break;
}
bool found = false;
vector<Point2f>& corners = imagePoints[k][j];
for( int scale = 1; scale <= maxScale; scale++ )
{
Mat timg;
if( scale == 1 )
timg = img;
else
resize(img, timg, Size(), scale, scale);
found = findChessboardCorners(timg, boardSize, corners,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
if( scale > 1 )
{
Mat cornersMat(corners);
cornersMat *= 1./scale;
}
break;
}
}
if( displayCorners )
{
cout << filename << endl;
Mat cimg, cimg1;
cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 640./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf);
imshow("corners", cimg1);
char c = (char)waitKey(500);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
if( !found )
break;
cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
30, 0.01));
}
if( k == 2 )
{
goodImageList.push_back(imagelist[i*2]);
goodImageList.push_back(imagelist[i*2+1]);
j++;
}
}
cout << j << " pairs have been successfully detected.\n";
nimages = j;
if( nimages < 2 )
{
cout << "Error: too little pairs to run the calibration\n";
return;
}

imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);

for( i = 0; i < nimages; i++ )
{
for( j = 0; j < boardSize.height; j++ )
for( k = 0; k < boardSize.width; k++ )
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}

cout << "Running stereo calibration ...\n";

Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);
Mat R, T, E, F;

double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
cout << "done with RMS error=" << rms << endl;

// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for( i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints[0][i].size();
Mat imgpt[2];
for( k = 0; k < 2; k++ )
{
imgpt[k] = Mat(imagePoints[k][i]);
undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
}
for( j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average epipolar err = " << err/npoints << endl;

// save intrinsic parameters
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";

Mat R1, R2, P1, P2, Q;
Rect validRoi[2];

stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

fs.open("extrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the extrinsic parameters\n";

// OpenCV can handle left-right
// or up-down camera arrangements
bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

// COMPUTE AND DISPLAY RECTIFICATION
if( !showRectified )
return;

Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useCalibrated )
{
// we already computed everything
}
// OR ELSE HARTLEY'S METHOD
else
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
vector<Point2f> allimgpt[2];
for( k = 0; k < 2; k++ )
{
for( i = 0; i < nimages; i++ )
std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
}
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
P2 = cameraMatrix[1];
}

//Precompute maps for cv::remap()
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

Mat canvas;
double sf;
int w, h;
if( !isVerticalStereo )
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w*2, CV_8UC3);
}
else
{
sf = 300./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h*2, w, CV_8UC3);
}

for( i = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
if( useCalibrated )
{
Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
}
}

if( !isVerticalStereo )
for( j = 0; j < canvas.rows; j += 16 )
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
else
for( j = 0; j < canvas.cols; j += 16 )
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
char c = (char)waitKey();
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}


static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}

int main(int argc, char** argv)
{
Size boardSize;
string imagelistfn;
bool showRectified;
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|../data/stereo_calib.xml|}");
if (parser.has("help"))
return print_help();
showRectified = !parser.has("nr");
imagelistfn = parser.get<string>("@input");
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
float squareSize = parser.get<float>("s");
if (!parser.check())
{
parser.printErrors();
return 1;
}
vector<string> imagelist;
bool ok = readStringList(imagelistfn, imagelist);
if(!ok || imagelist.empty())
{
cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
return print_help();
}

StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified);
return 0;
}

和输出的yml文件

内在.yml

%YAML:1.0
---
M1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9985689637206394e+02, 0., 3.1888931960018391e+02, 0.,
7.9531749551802511e+02, 2.4016473855341377e+02, 0., 0., 1. ]
D1: !!opencv-matrix
rows: 1
cols: 14
dt: d
data: [ -2.9279927390873359e-02, -1.7234478154581664e-02, 0., 0., 0.,
0., 0., -6.8058126545379194e-01, 0., 0., 0., 0., 0., 0. ]
M2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 7.9985689637206394e+02, 0., 3.2120481600280135e+02, 0.,
7.9531749551802511e+02, 2.3825084123786758e+02, 0., 0., 1. ]
D2: !!opencv-matrix
rows: 1
cols: 14
dt: d
data: [ -8.2357568517112279e-03, -3.0119285678826862e-02, 0., 0., 0.,
0., 0., -7.5797854621684968e-01, 0., 0., 0., 0., 0., 0. ]

外部.yml

R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 8.8138908449290321e-01, -1.2148825396001222e-02,
-4.7223478035640226e-01, 1.5998657939736358e-02,
9.9986345265635612e-01, 4.1375096662294207e-03,
4.7212003206905900e-01, -1.1201878574967093e-02,
8.8146309805657919e-01 ]
T: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [ 1.5773673401082249e+01, 4.2836222925900280e-02,
5.0227833287859989e+00 ]
R1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.8312585634867622e-01, -1.2387599744347073e-02,
-1.8251054202773007e-01, 1.2955703518228558e-02,
9.9991422663040130e-01, 1.9207104087663354e-03,
1.8247109449178447e-01, -4.2528525368786835e-03,
9.8320202040082783e-01 ]
R2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.5285471898202379e-01, 2.5876469050964915e-03,
3.0341586741168125e-01, -1.6357939454294122e-03,
9.9999291182572214e-01, -3.3912352442119049e-03,
-3.0342249206651850e-01, 2.7350286667662269e-03,
9.5285219783885455e-01 ]
P1: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.0573853248682479e+02, 0., 2.5764449977874756e+02, 0., 0.,
4.0573853248682479e+02, 2.3996722984313965e+02, 0., 0., 0., 1.,
0. ]
P2: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.0573853248682479e+02, 0., 2.5764449977874756e+02,
6.7166452242782234e+03, 0., 4.0573853248682479e+02,
2.3996722984313965e+02, 0., 0., 0., 1., 0. ]
Q: !!opencv-matrix
rows: 4
cols: 4
dt: d
data: [ 1., 0., 0., -2.5764449977874756e+02, 0., 1., 0.,
-2.3996722984313965e+02, 0., 0., 0., 4.0573853248682479e+02, 0.,
0., -6.0407914805478774e-02, 0. ]

最佳答案

我找到了 yml 问题的答案。

原来立体声校准程序 intrinsics.yml 输出的 M1 D1 M2 D2 值需要手动更改(但可以在程序中更改)更改为以下

M1 更改为 cam1_intrinsicsD1 更改为 cam1_distorsionM2 更改为 cam2_intrinsicsD2 改变 cam2_distorsion

重命名后,我将这些部分复制并粘贴到 extrinics.yml 文件的顶部

这让我达到了可以进步的地步。

关于c++ - opencv 解码格雷码模式相机校准错误。如何格式化内在和外在结果?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42285108/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com