gpt4 book ai didi

c++ - OpenCV 不需要的颜色混合

转载 作者:塔克拉玛干 更新时间:2023-11-03 01:30:31 25 4
gpt4 key购买 nike

我编写了一个简短的程序来演示使用 OpenCV 进行霍夫线检测。

在最后一步,代码采用原始的模糊灰度图像,叠加 Canny 边缘检测结果,然后叠加霍夫变换检测到的线。

hough 线被渲染为纯红色 (R=255),3px 线,但是当我覆盖它们时,由于某种原因下图显示出来。示例如下。

原始图片:Original Image

带有 Canny 边缘 + 霍夫线覆盖的模糊灰度图像:enter image description here

放大片段:enter image description here

可以看出,灰度图像穿过(明显)纯红色。这是为什么?

完整代码如下:

houghtest.cpp

#include <stdlib.h>
#include <iostream>
#include <stdio.h>

#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"

#include "toolbarconfig.h"


using namespace cv;


// Global variables
const char* window_name = "Hough Line Detection";

ToolbarConfig
gaussian = ToolbarConfig(0, 15, 1, 6),
canny = ToolbarConfig(20, 150, 2, 40),
hough = ToolbarConfig(50, 400, 10, 200);

Mat input;

// Function prototypes
void update(int, void*);
void chromakey(const Mat under, const Mat over, Mat *dst, const Scalar& color);
void help();


/**
* Creates an interactive example of running hough line detection on a
* sample image
*/
int main( int argc, char** argv ) {
const char* filename = argc >= 2 ? argv[1] : "pic1.png";

input = imread(filename, CV_LOAD_IMAGE_COLOR); if(input.empty()) {
help();

std::cout << "Can not open " << filename << std::endl;
return -1;
}

// Convert the image to grayscale
cvtColor(input, input, CV_BGR2GRAY);

// Create a window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);

// Create trackbars for the user to enter thresholds
createTrackbar("Gaussian Kernel Size", window_name, &(gaussian.t_current), gaussian.tmax(), update);
createTrackbar("Canny Min Threshold", window_name, &(canny.t_current), canny.tmax(), update);
createTrackbar("Hough Line Threshold", window_name, &(hough.t_current), hough.tmax(), update);

// Show the image
update(NULL, NULL);

// Wait until user exit program by pressing a key
waitKey(0);

return 0;
}


/**
* Trackbar callback - updates the display
*/
void update(int, void*) {
const int CANNY_RATIO = 3, CANNY_KERNEL_SIZE = 3;

Mat blurred_input, canny_edges, hough_lines;

// Reduce noise with a gaussian kernel
if(gaussian.current() != 0) {
blur(input, blurred_input, Size(gaussian.current(), gaussian.current()));
} else {
blurred_input = input;
}

// Run Canny edge detector
Canny(blurred_input, canny_edges, canny.current(), canny.current()*CANNY_RATIO, CANNY_KERNEL_SIZE);

// ==== Begin Hough line detector phase

// Create a vector to store the located lines in
vector<Vec2f> line_vector;

// Run the transform
HoughLines(canny_edges, line_vector, 1, CV_PI/180, hough.current(), 0, 0);

//std::cout << lines.size() << " lines detected" << std::endl;

// Prepare the hough_lines image
hough_lines = Mat::zeros(canny_edges.rows, canny_edges.cols, CV_8UC3);

// Draw detected lines into an image
for(size_t i = 0; i < line_vector.size(); i++) {
float rho = line_vector[i][0], theta = line_vector[i][1];
Point pt1, pt2;

double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;

pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));

line(hough_lines, pt1, pt2, Scalar(0, 0, 255), 3, 0);
}

// Overlay the hough lines onto the original blurred image
Mat blurred_input_color, canny_edges_color, input_with_canny, combined_images;
cvtColor(blurred_input, blurred_input_color, CV_GRAY2BGR);
cvtColor(canny_edges, canny_edges_color, CV_GRAY2BGR);

chromakey(blurred_input_color, canny_edges_color, &input_with_canny, Scalar(0, 0, 0));
chromakey(input_with_canny, hough_lines, &combined_images, Scalar(0, 0, 0));

// Display the result
imshow(window_name, combined_images);
}

/**
* Takes two images and overlays them, using color as a chroma-key
* Any pixels in the 'over' image that match the given color value will
* effectively be transparent - the 'under' image will show through
*
* @precondition: All passed images must first be in BGR format
*/
void chromakey(const Mat under, const Mat over, Mat *dst, const Scalar& color) {
// Mats must be the same size
if(under.rows != over.rows || under.cols != over.cols) {
std::cout << "Error, image dimensions must match" << std::endl;
return;
}

// Create the destination matrix
*dst = Mat::zeros(under.rows, under.cols, CV_8UC3);

for(int y=0; y<under.rows; y++) {
for(int x=0; x<under.cols; x++) {
dst->at<Vec3b>(y,x)[0] = over.at<Vec3b>(y,x)[0] == color[0] ? under.at<Vec3b>(y,x)[0] : over.at<Vec3b>(y,x)[0];
dst->at<Vec3b>(y,x)[1] = over.at<Vec3b>(y,x)[1] == color[1] ? under.at<Vec3b>(y,x)[1] : over.at<Vec3b>(y,x)[1];
dst->at<Vec3b>(y,x)[2] = over.at<Vec3b>(y,x)[2] == color[2] ? under.at<Vec3b>(y,x)[2] : over.at<Vec3b>(y,x)[2];
}
}
}


/**
* Prints usage information
*/
void help() {
std::cout << "\nThis program demonstrates line finding with the Hough transform.\n" "Usage:\n"
"./houghlines <image_name>, Default is pic1.png\n" << std::endl;
}

工具栏配置.h

#ifndef TOOLBARCONFIG_H
#define TOOLBARCONFIG_H

class ToolbarConfig {
public:
ToolbarConfig(int min, int max, int stepsize, int current);

int w2t(int world_value);
int t2w(int toolbar_value);

int current();
int tmax();
int tmin();

int min;
int max;
int stepsize;

int t_current;
};

#endif

工具栏配置.cpp

#include <algorithm>

#include "toolbarconfig.h"

ToolbarConfig::ToolbarConfig(int min, int max, int stepsize, int current) {
this->min = min;
this->max = max;
this->stepsize = stepsize;
this->t_current = this->w2t(current);
}

int ToolbarConfig::w2t(int world_value) {
return int((std::min(std::max(world_value, min), max) - min) / stepsize);
}

int ToolbarConfig::t2w(int toolbar_value) {
return toolbar_value * stepsize + min;
}

int ToolbarConfig::current() {
return t2w(t_current);
}

int ToolbarConfig::tmax() {
return w2t(max);
}

int ToolbarConfig::tmin() {
return w2t(min);
}

如果需要,我很乐意提供我的 Makefile。

提前致谢。

最佳答案

错误一定是在这里:

dst->at<Vec3b>(y,x)[0] = over.at<Vec3b>(y,x)[0] == color[0] ? under.at<Vec3b>(y,x)[0] : over.at<Vec3b>(y,x)[0];
dst->at<Vec3b>(y,x)[1] = over.at<Vec3b>(y,x)[1] == color[1] ? under.at<Vec3b>(y,x)[1] : over.at<Vec3b>(y,x)[1];
dst->at<Vec3b>(y,x)[2] = over.at<Vec3b>(y,x)[2] == color[2] ? under.at<Vec3b>(y,x)[2] : over.at<Vec3b>(y,x)[2];

当你第一次打电话时

chromakey(blurred_input_color, canny_edges_color, &input_with_canny, Scalar(0, 0, 0));

canny_edges_color的白色像素值为 (255, 255, 255),所以在上面的比较中,你会得到 over每个 channel 的值,因此像素的颜色将为 (255, 255, 255),图像将正确显示。

然而,在第二种情况下:

chromakey(input_with_canny, hough_lines, &combined_images, Scalar(0, 0, 0));

你的hugh_lines红色像素的值为 (0, 0, 255),因此对于前两次比较,它们将获得值 under。 , 因为

over.at<Vec3b>(y,x)[0] == color[0]

over.at<Vec3b>(y,x)[1] == color[1] .

只有 dst->at<Vec3b>(y,x)[2]将获得 255 值。为了使线条显示为实线,它应该是 dst->at<Vec3b>(y,x)[0] = 0dst->at<Vec3b>(y,x)[1] = 0而不是在这种情况下。


此外,根据 this answer你应该像这样初始化 *dst:

*dst = Mat(under.rows,under.cols,CV_8UC3,CV_RGB(0,0,0));

因为它是一个 3 channel 垫。

关于c++ - OpenCV 不需要的颜色混合,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/12454134/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com