gpt4 book ai didi

c++ - OpenCV 3.0 中的事件轮廓模型

转载 作者:搜寻专家 更新时间:2023-10-31 02:20:17 25 4
gpt4 key购买 nike

我正在尝试使用 C++ 中的 Opencv 3.0 实现主动轮廓模型算法。该算法基于我为 MatLab 编写的脚本,但未按预期运行。这两个图像显示了两种算法运行的结果。

MatLab 脚本:

和 OpenCV 之一:

在它们中,我为所有 ACM 参数使用了相同的值,因此它们应该返回相同的东西,即白色圆圈轮廓。我怀疑问题是我的图像能量函数,因为 opencv 和 matlab 中的梯度操作不一样。图像能量的matlab脚本是:

function [Eext] = get_eext(wl, we, wt, image)

%External Energy
[row,col] = size(image);
eline = image; %eline is simply the image intensities

[grady,gradx] = gradient(image);
eedge = -1 *(gradx .* gradx + grady .* grady);



%masks for taking various derivatives
m1 = [-1 1];
m2 = [-1;1];
m3 = [1 -2 1];
m4 = [1;-2;1];
m5 = [1 -1;-1 1];

cx = conv2(image,m1,'same');
cy = conv2(image,m2,'same');
cxx = conv2(image,m3,'same');
cyy = conv2(image,m4,'same');
cxy = conv2(image,m5,'same');

eterm = zeros(row, col);

for i = 1:row;
for j= 1:col;
% eterm as deined in Kass et al Snakes paper
eterm(i,j) = (cyy(i,j)*cx(i,j)*cx(i,j) -2 *cxy(i,j)*cx(i,j)...
*cy(i,j) + cxx(i,j)*cy(i,j)*cy(i,j))/((1+cx(i,j)*cx(i,j)...
+ cy(i,j)*cy(i,j))^1.5);
end;
end;

Eext = (wl*eline + we*eedge + wt*eterm);

在 C++ 中,我的函数变成了这样:

Mat get_eext(float wl, float we, float wt, Mat image){

Mat eline, gradx, grady, img_gray, eedge;

//bitdepth defined as CV_32F
image.convertTo(img_gray, bitdepth);

//Convolution Kernels
Mat m1, m2, m3, m4, m5;
m1 = (Mat_<float>(1, 2) << -1, 1);
m2 = (Mat_<float>(2, 1) << -1, 1);
m3 = (Mat_<float>(1, 3) << 1, -2, 1);
m4 = (Mat_<float>(3, 1) << 1, -2, 1);
m5 = (Mat_<float>(2, 2) << 1, -1, -1, 1);

//cvtColor(image, img_gray, CV_BGR2GRAY); <- Not required since image already in grayscale
img_gray.copyTo(eline);

Mat kernelx = (Mat_<float>(1, 3) << -0.5, 0, 0.5);
Mat kernely = (Mat_<float>(3, 1) << -0.5, 0, 0.5);

filter2D(img_gray, gradx, -1, kernelx);
filter2D(img_gray, grady, -1, kernely);

//Edge Energy
eedge = -1 * (gradx.mul(gradx) + grady.mul(grady));

//Termination Energy Convolution
Mat cx, cy, cxx, cyy, cxy, eterm, cxm1, den, cxcx, cxcxm1, cxcxcy, cxcycxy, cycycxx;
filter2D(img_gray, cx, bitdepth, m1);
filter2D(img_gray, cy, bitdepth, m2);
filter2D(img_gray, cxx, bitdepth, m3);
filter2D(img_gray, cyy, bitdepth, m4);
filter2D(img_gray, cxy, bitdepth, m5);

//element wise operations to find Eterm
cxcx = cx.mul(cx);
cxcx.convertTo(cxcxm1, -1, 1, 1);
den = cxcxm1 + cy.mul(cy);
cv::pow(den, 1.5, den);
cxcxcy = cxcx.mul(cy);
cxcycxy = cx.mul(cy);
cxcycxy = cxcycxy.mul(cxy);
cycycxx = cy.mul(cy);
cycycxx = cycycxx.mul(cxx);
eterm = (cxcxcy - 2 * cxcycxy + cycycxx);
cv::divide(eterm,den,eterm,-1);

//Image energy
Mat eext;
eext = wl*eline + we*eedge + wt*eterm;
return eext;}

有谁知道哪里出了问题?

最佳答案

正如 David Doria 所问,这是函数 get_eext 经过几次更正后的最终版本。这个版本对我来说效果很好。

Mat config_eext(float wl, float we, float wt, Mat image)
{
Mat eline, gradx, grady, img_gray, eedge;

//bitdepth defined as CV_32F
image.convertTo(img_gray, bitdepth);

//Convolution Kernels
Mat m1, m2, m3, m4, m5;
m1 = (Mat_<float>(1, 2) << 1, -1);
m2 = (Mat_<float>(2, 1) << 1, -1);
m3 = (Mat_<float>(1, 3) << 1, -2, 1);
m4 = (Mat_<float>(3, 1) << 1, -2, 1);
m5 = (Mat_<float>(2, 2) << 1, -1, -1, 1);

img_gray.copyTo(eline);

//Kernels de gradiente
Mat kernelx = (Mat_<float>(1, 3) << -1, 0, 1);
Mat kernely = (Mat_<float>(3, 1) << -1, 0, 1);

//Gradiente em x e em y
filter2D(img_gray, gradx, -1, kernelx);
filter2D(img_gray, grady, -1, kernely);

//Edge Energy como definido por Kass
eedge = -1 * (gradx.mul(gradx) + grady.mul(grady));

//Termination Energy Convolution
Mat cx, cy, cxx, cyy, cxy, eterm(img_gray.rows, img_gray.cols, bitdepth), cxm1, den, cxcx, cxcxm1, cxcxcy, cxcycxy, cycycxx;
filter2D(img_gray, cx, bitdepth, m1);
filter2D(img_gray, cy, bitdepth, m2);
filter2D(img_gray, cxx, bitdepth, m3);
filter2D(img_gray, cyy, bitdepth, m4);
filter2D(img_gray, cxy, bitdepth, m5);

//element wise operations to find Eterm
cxcx = cx.mul(cx);
cxcx.convertTo(cxcxm1, -1, 1, 1);
den = cxcxm1 + cy.mul(cy);
cv::pow(den, 1.5, den);
cxcxcy = cxcx.mul(cy);
cxcycxy = cx.mul(cy);
cxcycxy = cxcycxy.mul(cxy);
cycycxx = cy.mul(cy);
cycycxx = cycycxx.mul(cxx);
eterm = (cxcxcy - 2 * cxcycxy + cycycxx);
cv::divide(eterm, den, eterm, -1);

//Image energy
Mat eext;
eext = wl*eline + we*eedge + wt*eterm;
return eext;
}

希望对您有所帮助!

关于c++ - OpenCV 3.0 中的事件轮廓模型,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32894542/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com